code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107
|
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = [False] * len(_UpperCamelCase )
lowercase : Optional[int] = []
queue.append(_UpperCamelCase )
lowercase : Union[str, Any] = True
while queue:
lowercase : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
lowercase : Tuple = True
lowercase : Optional[Any] = u
return visited[t]
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[str] = [-1] * (len(_UpperCamelCase ))
lowercase : int = 0
while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : List[str] = float('''Inf''' )
lowercase : int = sink
while s != source:
# Find the minimum value in select path
lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] )
lowercase : Union[str, Any] = parent[s]
max_flow += path_flow
lowercase : Optional[int] = sink
while v != source:
lowercase : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Union[str, Any] = parent[v]
return max_flow
__a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__a , __a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 337
| 0
|
'''simple docstring'''
from manim import *
class __lowercase ( _a ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = Rectangle(height=0.5 ,width=0.5 )
__lowercase = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_a ).arrange(_a ,buff=0 )
__lowercase = VGroup(*_a ).arrange(_a ,buff=0 )
__lowercase = VGroup(_a ,_a ).arrange(_a ,buff=0 )
__lowercase = Text('''CPU''' ,font_size=24 )
__lowercase = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*_a ).arrange(_a ,buff=0 )
__lowercase = Text('''GPU''' ,font_size=24 )
__lowercase = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_a ).arrange(_a ,buff=0 )
__lowercase = Text('''Model''' ,font_size=24 )
__lowercase = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
__lowercase = []
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowercase = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_a ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] ,direction=_a ,buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] ,direction=_a ,buff=0.0 )
self.add(_a )
cpu_targs.append(_a )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_a ).arrange(_a ,buff=0 )
__lowercase = Text('''Loaded Checkpoint''' ,font_size=24 )
__lowercase = Group(_a ,_a ).arrange(_a ,aligned_edge=_a ,buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_a ,_a )
__lowercase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" ,font_size=18 ,)
blue_text.next_to(_a ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
__lowercase = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) ,Write(_a ) )
self.play(Write(_a ,run_time=1 ) ,Create(_a ,run_time=1 ) )
__lowercase = []
__lowercase = []
for i, rect in enumerate(_a ):
__lowercase = fill.copy().set_fill(_a ,opacity=0.7 )
target.move_to(_a )
first_animations.append(GrowFromCenter(_a ,run_time=1 ) )
__lowercase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_a ,run_time=1.5 ) )
self.play(*_a )
self.play(*_a )
self.wait()
| 370
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "gpt_neox_japanese"
def __init__(self ,_lowerCamelCase=32000 ,_lowerCamelCase=2560 ,_lowerCamelCase=32 ,_lowerCamelCase=32 ,_lowerCamelCase=4 ,_lowerCamelCase="gelu" ,_lowerCamelCase=1.0_0 ,_lowerCamelCase=10000 ,_lowerCamelCase=2048 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-5 ,_lowerCamelCase=True ,_lowerCamelCase=31996 ,_lowerCamelCase=31999 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.0 ,**_lowerCamelCase ,) -> Optional[int]:
'''simple docstring'''
super().__init__(bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_multiple_size
__lowercase = hidden_act
__lowercase = rotary_pct
__lowercase = rotary_emb_base
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = attention_dropout
__lowercase = hidden_dropout
| 217
| 0
|
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = arr.split(''',''' )
def a_ ( self ):
snake_case = [int(self.array[0] )] * len(self.array )
snake_case = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = input("please input some numbers:")
_SCREAMING_SNAKE_CASE : Optional[int] = SubArray(whole_array)
_SCREAMING_SNAKE_CASE : List[Any] = array.solve_sub_array()
print(("the results is:", re))
| 127
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Any ="""
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8):
UpperCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=5_12 , _lowerCAmelCase=5_12):
UpperCamelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
UpperCamelCase_ = np.array(pil_image.convert("RGB"))
UpperCamelCase_ = arr.astype(np.floataa) / 127.5 - 1
UpperCamelCase_ = np.transpose(_lowerCAmelCase , [2, 0, 1])
UpperCamelCase_ = torch.from_numpy(_lowerCAmelCase).unsqueeze(0)
return image
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = min(int(num_inference_steps * strength ) , snake_case__ )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
UpperCamelCase_ = image.to(device=snake_case__ , dtype=snake_case__ )
UpperCamelCase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase_ = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
else:
UpperCamelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
UpperCamelCase_ = self.movq.config.scaling_factor * init_latents
UpperCamelCase_ = torch.cat([init_latents] , dim=0 )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
UpperCamelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = init_latents
return latents
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
UpperCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 0.3 , snake_case__ = 1 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
UpperCamelCase_ = self._execution_device
UpperCamelCase_ = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
UpperCamelCase_ = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
UpperCamelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ )
UpperCamelCase_ = self.movq.encode(snake_case__ )["latents"]
UpperCamelCase_ = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
UpperCamelCase_ = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = {"image_embeds": image_embeds}
UpperCamelCase_ = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
UpperCamelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase_ = image * 0.5 + 0.5
UpperCamelCase_ = image.clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 128
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='albert'
def __init__(self , a_=3_00_00 , a_=1_28 , a_=40_96 , a_=12 , a_=1 , a_=64 , a_=1_63_84 , a_=1 , a_="gelu_new" , a_=0 , a_=0 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=0.1 , a_="absolute" , a_=0 , a_=2 , a_=3 , **a_ , ):
'''simple docstring'''
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case : List[Any] = vocab_size
__snake_case : Optional[int] = embedding_size
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_hidden_groups
__snake_case : Optional[Any] = num_attention_heads
__snake_case : str = inner_group_num
__snake_case : List[Any] = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = classifier_dropout_prob
__snake_case : Any = position_embedding_type
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__snake_case : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 24
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9
|
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
snake_case__ = """docs/source/en/_toctree.yml"""
def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> List[str]:
A_ : List[str] = defaultdict(lowerCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
A_ : Tuple = [key for key, value in counts.items() if value > 1]
A_ : Union[str, Any] = []
for duplicate_key in duplicates:
A_ : Tuple = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(lowerCamelCase__ ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : s["title"].lower() )
def snake_case__ ( lowerCamelCase__ : Optional[Any]=False ) -> Any:
with open(lowerCamelCase__ , encoding='''utf-8''' ) as f:
A_ : str = yaml.safe_load(f.read() )
# Get to the API doc
A_ : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A_ : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
A_ : Dict = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A_ : str = api_doc[model_idx]['''sections''']
A_ : Any = [(idx, section) for idx, section in enumerate(lowerCamelCase__ ) if '''sections''' in section]
A_ : int = False
for idx, modality_doc in modalities_docs:
A_ : List[Any] = modality_doc['''sections''']
A_ : Any = clean_model_doc_toc(lowerCamelCase__ )
if old_modality_doc != new_modality_doc:
A_ : Tuple = True
if overwrite:
A_ : List[str] = new_modality_doc
if diff:
if overwrite:
A_ : str = model_doc
A_ : List[Any] = api_doc
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase__ , allow_unicode=lowerCamelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 4
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = "utf-8"
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = True # deprecated
_lowerCAmelCase = None # deprecated
_lowerCAmelCase = 1_0 << 2_0 # 10MB
_lowerCAmelCase = None
class UpperCamelCase_ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCAmelCase = JsonConfig
def _a ( self : int ):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
A_ : List[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _a ( self : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
A_ : Union[str, Any] = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [files]
A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
A_ : Tuple = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : int = [files]
A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def _a ( self : int , _lowerCamelCase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type
A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def _a ( self : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : int = json.load(_lowerCamelCase )
# We keep only the field we are interested in
A_ : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
A_ : int = set().union(*[row.keys() for row in dataset] )
A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
A_ : Tuple = dataset
A_ : Dict = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
A_ : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : int = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A_ : Any = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
A_ : List[Any] = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
A_ : Optional[int] = set().union(*[row.keys() for row in dataset] )
A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
A_ : int = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 4
| 1
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
__lowerCamelCase : Union[str, Any] = model
__lowerCamelCase : Optional[Any] = kwargs.get('model_save_dir' , __lowerCamelCase )
__lowerCamelCase : str = kwargs.get('latest_model_name' , __lowerCamelCase )
def __call__( self , **SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
__lowerCamelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__lowerCamelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__lowerCamelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__lowerCamelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__lowerCamelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__lowerCamelCase : Any = Path(__lowerCamelCase ).parent
__lowerCamelCase : List[Any] = Path(__lowerCamelCase ).name
__lowerCamelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : int = None
if len(str(__lowerCamelCase ).split('@' ) ) == 2:
__lowerCamelCase : Any = model_id.split('@' )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 185
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowercase )} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase__: str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _lowerCamelCase ( self: str ) -> Tuple:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class _snake_case :
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase__: Optional[int] = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
lowerCamelCase__: Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase__: float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def _lowerCamelCase ( self: Any ) -> Tuple:
if self.train_file is not None:
__UpperCAmelCase : Optional[int] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__UpperCAmelCase : str = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : List[str] = [json.loads(snake_case__ ) for line in f.read().splitlines() if (len(snake_case__ ) > 0 and not line.isspace())]
assert len(snake_case__ ) == len(snake_case__ )
__UpperCAmelCase : Optional[int] = {c: dataset[c] for c in dataset.column_names}
__UpperCAmelCase : Any = refs
return Dataset.from_dict(snake_case__ )
def _UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCAmelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase : Optional[Any] = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__UpperCAmelCase : Dict = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[:{data_args.validation_split_percentage}%]''', )
__UpperCAmelCase : List[str] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[{data_args.validation_split_percentage}%:]''', )
else:
__UpperCAmelCase : List[Any] = {}
if data_args.train_file is not None:
__UpperCAmelCase : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : List[str] = data_args.validation_file
__UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1]
if extension == "txt":
__UpperCAmelCase : str = "text"
__UpperCAmelCase : List[Any] = load_dataset(snake_case__, data_files=snake_case__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.config_name, **snake_case__ )
elif model_args.model_name_or_path:
__UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.model_name_or_path, **snake_case__ )
else:
__UpperCAmelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
__UpperCAmelCase : List[Any] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **snake_case__ )
elif model_args.model_name_or_path:
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **snake_case__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
__UpperCAmelCase : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=snake_case__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
__UpperCAmelCase : Any = AutoModelForMaskedLM.from_config(snake_case__ )
model.resize_token_embeddings(len(snake_case__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__UpperCAmelCase : List[str] = datasets["train"].column_names
else:
__UpperCAmelCase : Union[str, Any] = datasets["validation"].column_names
__UpperCAmelCase : Union[str, Any] = "text" if "text" in column_names else column_names[0]
__UpperCAmelCase : Any = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(snake_case__ ):
# Remove empty lines
__UpperCAmelCase : Any = [line for line in examples["text"] if len(snake_case__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"], padding=snake_case__, truncation=snake_case__, max_length=data_args.max_seq_length )
__UpperCAmelCase : List[str] = datasets.map(
snake_case__, batched=snake_case__, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__UpperCAmelCase : str = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__UpperCAmelCase : List[str] = add_chinese_references(
tokenized_datasets["validation"], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__UpperCAmelCase : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__UpperCAmelCase : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
__UpperCAmelCase : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=snake_case__, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCAmelCase : str = Trainer(
model=snake_case__, args=snake_case__, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=snake_case__, data_collator=snake_case__, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCAmelCase : int = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__UpperCAmelCase : Any = model_args.model_name_or_path
else:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : str = os.path.join(training_args.output_dir, "train_results.txt" )
if trainer.is_world_process_zero():
with open(snake_case__, "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json" ) )
# Evaluation
__UpperCAmelCase : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : List[Any] = trainer.evaluate()
__UpperCAmelCase : int = math.exp(eval_output["eval_loss"] )
__UpperCAmelCase : Union[str, Any] = perplexity
__UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(snake_case__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _UpperCamelCase ( snake_case__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 157
| 0
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
__lowercase = logging.getLogger(__name__)
__lowercase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[int] = """bertabs"""
def __init__( self , __lowercase=30_522 , __lowercase=512 , __lowercase=6 , __lowercase=512 , __lowercase=8 , __lowercase=512 , __lowercase=0.2 , __lowercase=6 , __lowercase=768 , __lowercase=8 , __lowercase=2_048 , __lowercase=0.2 , **__lowercase , ) -> Tuple:
super().__init__(**__lowercase)
__UpperCamelCase :int = vocab_size
__UpperCamelCase :List[Any] = max_pos
__UpperCamelCase :List[Any] = enc_layers
__UpperCamelCase :Optional[int] = enc_hidden_size
__UpperCamelCase :Dict = enc_heads
__UpperCamelCase :int = enc_ff_size
__UpperCamelCase :Tuple = enc_dropout
__UpperCamelCase :List[Any] = dec_layers
__UpperCamelCase :Tuple = dec_hidden_size
__UpperCamelCase :Tuple = dec_heads
__UpperCamelCase :Optional[Any] = dec_ff_size
__UpperCamelCase :Optional[int] = dec_dropout
| 357
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__lowercase = {'''allegro/herbert-base-cased''': 514}
__lowercase = {}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Tuple = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Union[str, Any] = HerbertTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase="</s>" , **__lowercase , ) -> Optional[Any]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sep_token=__lowercase , **__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :List[str] = [self.cls_token_id]
__UpperCamelCase :Tuple = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase)
if token_ids_a is None:
return [1] + ([0] * len(__lowercase)) + [1]
return [1] + ([0] * len(__lowercase)) + [1] + ([0] * len(__lowercase)) + [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> List[int]:
__UpperCamelCase :Optional[Any] = [self.sep_token_id]
__UpperCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[int] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
| 105
| 0
|
from random import randint, random
def A ( lowercase , lowercase , lowercase , lowercase = False , lowercase = False , lowercase = 5 , ) -> list:
'''simple docstring'''
UpperCamelCase = [[-1] * number_of_cells] # Create a highway without any car
UpperCamelCase = 0
UpperCamelCase = max(lowercase , 0 )
while i < number_of_cells:
UpperCamelCase = (
randint(0 , lowercase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = highway_now[car_index + 1 :]
for cell in range(len(lowercase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowercase , -1 )
def A ( lowercase , lowercase , lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = len(lowercase )
# Beforce calculations, the highway is empty
UpperCamelCase = [-1] * number_of_cells
for car_index in range(lowercase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
UpperCamelCase = min(highway_now[car_index] + 1 , lowercase )
# Number of empty cell before the next car
UpperCamelCase = get_distance(lowercase , lowercase ) - 1
# We can't have the car causing an accident
UpperCamelCase = min(next_highway[car_index] , lowercase )
if random() < probability:
# Randomly, a driver will slow down
UpperCamelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def A ( lowercase , lowercase , lowercase , lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = len(highway[0] )
for i in range(lowercase ):
UpperCamelCase = update(highway[i] , lowercase , lowercase )
UpperCamelCase = [-1] * number_of_cells
for car_index in range(lowercase ):
UpperCamelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
UpperCamelCase = (car_index + speed) % number_of_cells
# Commit the change of position
UpperCamelCase = speed
highway.append(lowercase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def A ( lowercase ) -> str:
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426_880 * Decimal(10_005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13_591_409
UpperCamelCase = Decimal(lowercase )
for k in range(1 , lowercase ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_UpperCAmelCase : Dict = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 222
| 1
|
import math
def snake_case ( snake_case__ :int) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( snake_case__ :float = 0.1) -> int:
_A = 3
_A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(snake_case__)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_SCREAMING_SNAKE_CASE = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
_SCREAMING_SNAKE_CASE = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
_SCREAMING_SNAKE_CASE = 0
for log in Path().glob('*.log'):
_SCREAMING_SNAKE_CASE = 0
with open(log, 'r') as f:
for line in f:
_SCREAMING_SNAKE_CASE = json.loads(line)
if line.get('nodeid', '') != "":
_SCREAMING_SNAKE_CASE = line['nodeid']
if line.get('duration', None) is not None:
_SCREAMING_SNAKE_CASE = F'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_SCREAMING_SNAKE_CASE = []
log.unlink()
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {}
for test in failed_tests:
_SCREAMING_SNAKE_CASE = test[0].split('::')
_SCREAMING_SNAKE_CASE = data[0].split('/')[-1]
if data[0] not in filesafailed:
_SCREAMING_SNAKE_CASE = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_SCREAMING_SNAKE_CASE = [test[0] for test in failed_table]
_SCREAMING_SNAKE_CASE = list(set(files))
# Count number of instances in failed_tests
_SCREAMING_SNAKE_CASE = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_SCREAMING_SNAKE_CASE = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_SCREAMING_SNAKE_CASE = 'Too many failed tests, please see the full report in the Action results.'
_SCREAMING_SNAKE_CASE = len(err) + 10
_SCREAMING_SNAKE_CASE = message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
_SCREAMING_SNAKE_CASE = 'No failed tests! 🤗'
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
_SCREAMING_SNAKE_CASE = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_SCREAMING_SNAKE_CASE = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
_SCREAMING_SNAKE_CASE = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_SCREAMING_SNAKE_CASE = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
_SCREAMING_SNAKE_CASE = row[0]
else:
_SCREAMING_SNAKE_CASE = ''
_SCREAMING_SNAKE_CASE = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 81
| 1
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
a_ = version.parse(accelerate.__version__ ).base_version
if version.parse(UpperCAmelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *UpperCAmelCase , **UpperCAmelCase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *UpperCAmelCase , **UpperCAmelCase )
return wrapper
| 243
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = GPTSwaTokenizer
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
a = GPTSwaTokenizer(__lowerCamelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Any:
a = "This is a test"
a = "This is a test"
return input_text, output_text
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
a = "<s>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__lowerCamelCase ) , 20_00 )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
a = GPTSwaTokenizer(__lowerCamelCase )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
# fmt: off
self.assertListEqual(
__lowerCamelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def __UpperCAmelCase ( self : List[Any] ) -> str:
a = GPTSwaTokenizer(__lowerCamelCase )
a = ["This is a test", "I was born in 92000, and this is falsé."]
a = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertListEqual(tokenizer.encode_fast(__lowerCamelCase ) , __lowerCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.decode_fast(__lowerCamelCase ) , __lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
a = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__lowerCamelCase , )
| 107
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = '▁'
UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __snake_case( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = BigBirdTokenizer
UpperCAmelCase : Tuple = BigBirdTokenizerFast
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = True
def __snake_case ( self ) -> List[str]:
super().setUp()
lowerCAmelCase = self.tokenizer_class(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __snake_case ( self ) -> int:
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(A_ ) , 1004 )
def __snake_case ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __snake_case ( self ) -> int:
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = """I was born in 92000, and this is falsé."""
lowerCAmelCase = tokenizer.tokenize(A_ )
lowerCAmelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCAmelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCAmelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(A_ )
lowerCAmelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = BigBirdTokenizer(A_ , keep_accents=A_ )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __snake_case ( self ) -> List[Any]:
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = """Hello World!"""
lowerCAmelCase = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def __snake_case ( self ) -> int:
lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@require_torch
@slow
def __snake_case ( self ) -> Optional[int]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = """ """.join(A_ )
lowerCAmelCase = self.big_tokenizer.encode_plus(A_ , return_tensors="""pt""" , return_token_type_ids=A_ )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=A_ )
lowerCAmelCase = BigBirdConfig(attention_type="""original_full""" )
lowerCAmelCase = BigBirdModel(A_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A_ )
model(**A_ )
@slow
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
lowerCAmelCase = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def __snake_case ( self ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = {"""input_ids""": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 354
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i]
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 999_999_999
lowerCAmelCase = 0
lowerCAmelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
lowerCAmelCase = remaining_time[j]
lowerCAmelCase = j
lowerCAmelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
lowerCAmelCase = remaining_time[short]
if minm == 0:
lowerCAmelCase = 999_999_999
if remaining_time[short] == 0:
complete += 1
lowerCAmelCase = False
# Find finish time of current process
lowerCAmelCase = increment_time + 1
# Calculate waiting time
lowerCAmelCase = finish_time - arrival_time[short]
lowerCAmelCase = finar - burst_time[short]
if waiting_time[short] < 0:
lowerCAmelCase = 0
# Increment time
increment_time += 1
return waiting_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = 0
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = total_waiting_time + waiting_time[i]
lowerCAmelCase = total_turn_around_time + turn_around_time[i]
print(f'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase = int(input())
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = [0] * no_of_processes
UpperCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase = map(int, input().split())
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = burst_time
UpperCAmelCase = no_of_processes
UpperCAmelCase = waiting_time
UpperCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 187
| 0
|
from math import factorial
def __lowerCamelCase ( __a :int , __a :int , __a :float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A__ = float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 274
|
import argparse
from collections import defaultdict
import yaml
A : str = '''docs/source/en/_toctree.yml'''
def __lowerCamelCase ( __a :str ) -> List[Any]:
"""simple docstring"""
A__ = defaultdict(__a )
A__ = []
A__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__a )
A__ = new_doc_list
A__ = [key for key, value in counts.items() if value > 1]
A__ = []
for duplicate_key in duplicates:
A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
A__ = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__a )
# Sort
return overview_doc
def __lowerCamelCase ( __a :Any=False ) -> List[str]:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
A__ = api_doc[scheduler_idx]["""sections"""]
A__ = clean_doc_toc(__a )
A__ = False
if new_scheduler_doc != scheduler_doc:
A__ = True
if overwrite:
A__ = new_scheduler_doc
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as f:
A__ = yaml.safe_load(f.read() )
# Get to the API doc
A__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A__ = content[api_idx]["""sections"""]
# Then to the model doc
A__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
A__ = False
A__ = api_doc[pipeline_idx]["""sections"""]
A__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
A__ = pipeline_doc["""section"""]
A__ = clean_doc_toc(__a )
if overwrite:
A__ = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
A__ = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
A__ = True
if overwrite:
A__ = new_pipeline_docs
if diff:
if overwrite:
A__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 274
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 64
| 0
|
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self , *,
lowerCAmelCase__ = 4 , lowerCAmelCase__ = 768 , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__()
a : Tuple = nn.Parameter(torch.zeros(lowerCAmelCase__ ) )
# parameters for additional clip time embeddings
a : str = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
# parameters for encoder hidden states
a : int = clip_extra_context_tokens
a : int = nn.Linear(
lowerCAmelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
a : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = nn.LayerNorm(lowerCAmelCase__ )
def __a ( self , *, lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
a : str = image_embeddings.shape[0]
a : Optional[int] = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
a : Any = classifier_free_guidance_embeddings.expand(
lowerCAmelCase__ , -1 )
a : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
a : List[str] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
a : Dict = self.embedding_proj(lowerCAmelCase__ )
a : List[str] = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase__ )
a : Dict = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
a : Union[str, Any] = self.clip_extra_context_tokens_proj(lowerCAmelCase__ )
a : List[str] = clip_extra_context_tokens.reshape(lowerCAmelCase__ , -1 , self.clip_extra_context_tokens )
a : Optional[Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
a : Optional[int] = self.encoder_hidden_states_proj(lowerCAmelCase__ )
a : str = self.text_encoder_hidden_states_norm(lowerCAmelCase__ )
a : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 105
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Any = logging.get_logger(__name__)
__snake_case :Optional[Any] = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__snake_case :List[Any] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def __snake_case ( _UpperCAmelCase ):
__a = EfficientNetConfig()
__a = CONFIG_MAP[model_name]['''hidden_dim''']
__a = CONFIG_MAP[model_name]['''width_coef''']
__a = CONFIG_MAP[model_name]['''depth_coef''']
__a = CONFIG_MAP[model_name]['''image_size''']
__a = CONFIG_MAP[model_name]['''dropout_rate''']
__a = CONFIG_MAP[model_name]['''dw_padding''']
__a = '''huggingface/label-files'''
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
__a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
def __snake_case ( _UpperCAmelCase ):
__a = CONFIG_MAP[model_name]['''image_size''']
__a = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=_UpperCAmelCase , )
return preprocessor
def __snake_case ( _UpperCAmelCase ):
__a = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__a = sorted(set(_UpperCAmelCase ) )
__a = len(_UpperCAmelCase )
__a = {b: str(_UpperCAmelCase ) for b, i in zip(_UpperCAmelCase , range(_UpperCAmelCase ) )}
__a = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__a = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__a = {}
for item in rename_keys:
if item[0] in original_param_names:
__a = '''efficientnet.''' + item[1]
__a = '''classifier.weight'''
__a = '''classifier.bias'''
return key_mapping
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__a = key_mapping[key]
if "_conv" in key and "kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__a = torch.from_numpy(_UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__a = torch.from_numpy(np.transpose(_UpperCAmelCase ) )
else:
__a = torch.from_numpy(_UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCAmelCase )
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = model_classes[model_name](
include_top=_UpperCAmelCase , weights='''imagenet''' , input_tensor=_UpperCAmelCase , input_shape=_UpperCAmelCase , pooling=_UpperCAmelCase , classes=1000 , classifier_activation='''softmax''' , )
__a = original_model.trainable_variables
__a = original_model.non_trainable_variables
__a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__a = param.numpy()
__a = list(tf_params.keys() )
# Load HuggingFace model
__a = get_efficientnet_config(_UpperCAmelCase )
__a = EfficientNetForImageClassification(_UpperCAmelCase ).eval()
__a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__a = rename_keys(_UpperCAmelCase )
replace_params(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__a = convert_image_processor(_UpperCAmelCase )
__a = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__a = hf_model(**_UpperCAmelCase )
__a = outputs.logits.detach().numpy()
# Original model inference
__a = False
__a = CONFIG_MAP[model_name]['''image_size''']
__a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__a = image.img_to_array(_UpperCAmelCase )
__a = np.expand_dims(_UpperCAmelCase , axis=0 )
__a = original_model.predict(_UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCAmelCase ):
os.mkdir(_UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCAmelCase )
preprocessor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
__a = f'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCAmelCase )
hf_model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__snake_case :Optional[int] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 49
| 0
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
UpperCAmelCase_ = downstream_dict['''projector.weight''']
UpperCAmelCase_ = downstream_dict['''projector.bias''']
UpperCAmelCase_ = downstream_dict['''model.post_net.linear.weight''']
UpperCAmelCase_ = downstream_dict['''model.post_net.linear.bias''']
return model
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
UpperCAmelCase_ = downstream_dict['''model.linear.weight''']
UpperCAmelCase_ = downstream_dict['''model.linear.bias''']
return model
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = UniSpeechSatForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
UpperCAmelCase_ = downstream_dict['''connector.weight''']
UpperCAmelCase_ = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
UpperCAmelCase_ = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
UpperCAmelCase_ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
UpperCAmelCase_ = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = torch.load(__UpperCAmelCase , map_location='''cpu''' )
UpperCAmelCase_ = checkpoint['''Downstream''']
UpperCAmelCase_ = UniSpeechSatConfig.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
UpperCAmelCase_ = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
UpperCAmelCase_ = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
UpperCAmelCase_ = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith('''ForXVector''' ):
UpperCAmelCase_ = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 344
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : Optional[Any] =(DPMSolverSinglestepScheduler,)
UpperCamelCase__ : Tuple =(("num_inference_steps", 25),)
def __a ( self :List[Any] , **_lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_lowercase)
return config
def __a ( self :Union[str, Any] , _lowercase :List[Any]=0 , **_lowercase :Optional[int]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_lowercase , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Union[str, Any]) -> List[Any]:
pass
def __a ( self :Optional[Any] , _lowercase :str=0 , **_lowercase :Union[str, Any]) -> Dict:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Dict , _lowercase :Union[str, Any]=None , **_lowercase :List[Any]) -> int:
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :int) -> Tuple:
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_574) < 1E-3
def __a ( self :List[Any]) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :int) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Tuple) -> int:
self.check_over_configs(thresholding=_lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , algorithm_type='''dpmsolver++''' , solver_order=_lowercase , solver_type=_lowercase , )
def __a ( self :List[Any]) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Optional[int]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
UpperCAmelCase_ = self.full_loop(
solver_order=_lowercase , solver_type=_lowercase , prediction_type=_lowercase , algorithm_type=_lowercase , )
assert not torch.isnan(_lowercase).any(), "Samples have nan numbers"
def __a ( self :Tuple) -> int:
self.check_over_configs(lower_order_final=_lowercase)
self.check_over_configs(lower_order_final=_lowercase)
def __a ( self :Tuple) -> Optional[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def __a ( self :Any) -> List[str]:
self.check_over_configs(variance_type=_lowercase)
self.check_over_configs(variance_type='''learned_range''')
def __a ( self :Any) -> Dict:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_lowercase , time_step=0)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_791) < 1E-3
def __a ( self :Any) -> Union[str, Any]:
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.2_248) < 1E-3
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.1_453) < 1E-3
def __a ( self :List[Any]) -> Dict:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_lowercase)
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_mean.item() - 0.0_649) < 1E-3
def __a ( self :Any) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_lowercase , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
assert sample.dtype == torch.floataa
| 344
| 1
|
"""simple docstring"""
from math import pi, sqrt
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
if num > 1_7_1.5:
raise OverflowError('math range error' )
elif num - int(UpperCamelCase__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(UpperCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(UpperCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCamelCase = 1.0
while num:
__lowerCamelCase = float(input("Gamma of: "))
print(F'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...")
| 221
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCamelCase__ , '_dynamo' ):
return False
return isinstance(UpperCamelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = True ):
"""simple docstring"""
A__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A__ = is_compiled_module(UpperCamelCase__ )
if is_compiled:
A__ = model
A__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = model.module
if not keep_fpaa_wrapper:
A__ = getattr(UpperCamelCase__ , 'forward' )
A__ = model.__dict__.pop('_original_forward' , UpperCamelCase__ )
if original_forward is not None:
while hasattr(UpperCamelCase__ , '__wrapped__' ):
A__ = forward.__wrapped__
if forward == original_forward:
break
A__ = forward
if getattr(UpperCamelCase__ , '_converted_to_transformer_engine' , UpperCamelCase__ ):
convert_model(UpperCamelCase__ , to_transformer_engine=UpperCamelCase__ )
if is_compiled:
A__ = model
A__ = compiled_model
return model
def UpperCAmelCase ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase__ , UpperCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase__ , UpperCamelCase__ )
@contextmanager
def UpperCAmelCase ( **UpperCamelCase__ ):
"""simple docstring"""
for key, value in kwargs.items():
A__ = str(UpperCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not hasattr(UpperCamelCase__ , '__qualname__' ) and not hasattr(UpperCamelCase__ , '__name__' ):
A__ = getattr(UpperCamelCase__ , '__class__' , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , '__qualname__' ):
return obj.__qualname__
if hasattr(UpperCamelCase__ , '__name__' ):
return obj.__name__
return str(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for key, value in source.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = destination.setdefault(UpperCamelCase__ , {} )
merge_dicts(UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = value
return destination
def UpperCAmelCase ( UpperCamelCase__ = None ):
"""simple docstring"""
if port is None:
A__ = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 221
| 1
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
__magic_name__ : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
__magic_name__ : Any = load_dataset("""nielsr/rvlcdip-demo""" )
__magic_name__ : int = dataset["""train"""][0]["""image"""].convert("""RGB""" )
__magic_name__ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : str = model(**lowerCAmelCase__ )
__magic_name__ : Optional[Any] = outputs.logits
__magic_name__ : List[str] = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
__magic_name__ : str = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 138
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__: Tuple = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: int = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__magic_name__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 1
|
# Lint as: python3
import itertools
import os
import re
snake_case_ = re.compile(R'([A-Z]+)([A-Z][a-z])')
snake_case_ = re.compile(R'([a-z\d])([A-Z])')
snake_case_ = re.compile(R'(?<!_)_(?!_)')
snake_case_ = re.compile(R'(_{2,})')
snake_case_ = R'^\w+(\.\w+)*$'
snake_case_ = R'<>:/\|?*'
def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[int]:
__snake_case = _uppercase_uppercase_re.sub(R'''\1_\2''' , snake_case_ )
__snake_case = _lowercase_uppercase_re.sub(R'''\1_\2''' , snake_case_ )
return name.lower()
def lowerCamelCase__ ( snake_case_ : Any ) -> Any:
__snake_case = _single_underscore_re.split(snake_case_ )
__snake_case = [_multiple_underscores_re.split(snake_case_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case_ ) if n != '''''' )
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> int:
if os.path.basename(snake_case_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(snake_case_ )
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Any ) -> str:
if os.path.basename(snake_case_ ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , snake_case_ ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(snake_case_ )}-{split}"""
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : int , snake_case_ : List[Any]=None ) -> Union[str, Any]:
__snake_case = filename_prefix_for_split(snake_case_ , snake_case_ )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
__snake_case = os.path.join(snake_case_ , snake_case_ )
return f"""{filepath}*"""
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Any=None , snake_case_ : Union[str, Any]=None ) -> str:
__snake_case = filename_prefix_for_split(snake_case_ , snake_case_ )
__snake_case = os.path.join(snake_case_ , snake_case_ )
if shard_lengths:
__snake_case = len(snake_case_ )
__snake_case = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(snake_case_ )]
if filetype_suffix:
__snake_case = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
__snake_case = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 24
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24
| 1
|
'''simple docstring'''
from __future__ import annotations
lowercase = list[tuple[int, int]]
lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
A : int = pos_x
A : Optional[Any] = pos_y
A : Optional[Any] = (pos_y, pos_x)
A : str = goal_x
A : Optional[int] = goal_y
A : List[Any] = g_cost
A : str = parent
A : str = self.calculate_heuristic()
def __lowerCAmelCase ( self ) -> float:
"""simple docstring"""
A : Optional[int] = abs(self.pos_x - self.goal_x )
A : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE )
A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE )
A : Optional[Any] = [self.start]
A : list[Node] = []
A : Tuple = False
def __lowerCAmelCase ( self ) -> Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
A : Optional[int] = True
return self.retrace_path(SCREAMING_SNAKE_CASE )
self.closed_nodes.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_successors(SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]:
"""simple docstring"""
A : List[Any] = []
for action in delta:
A : List[str] = parent.pos_x + action[1]
A : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path:
"""simple docstring"""
A : int = node
A : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowercase = (0, 0)
lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
lowercase = GreedyBestFirst(init, goal)
lowercase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowercase = 2
for elem in grid:
print(elem)
| 356
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
A : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , )
A : Optional[Any] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
| 311
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case ="""docs/source/en/_toctree.yml"""
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = defaultdict(lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase = [key for key, value in counts.items() if value > 1]
lowerCAmelCase = []
for duplicate_key in duplicates:
lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowerCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowerCamelCase , key=lambda lowerCamelCase : s["title"].lower() )
def a_ ( lowerCamelCase : List[Any]=False ):
with open(lowerCamelCase , encoding='utf-8' ) as f:
lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase = api_doc[model_idx]['sections']
lowerCAmelCase = [(idx, section) for idx, section in enumerate(lowerCamelCase ) if 'sections' in section]
lowerCAmelCase = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase = modality_doc['sections']
lowerCAmelCase = clean_model_doc_toc(lowerCamelCase )
if old_modality_doc != new_modality_doc:
lowerCAmelCase = True
if overwrite:
lowerCAmelCase = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase = model_doc
lowerCAmelCase = api_doc
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase , allow_unicode=lowerCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__snake_case =parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 4
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case ="""\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
__snake_case ="""\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
__snake_case ="""
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Optional[int]=False ) -> int:
lowerCAmelCase = compute_bleu(
reference_corpus=UpperCAmelCase__ , translation_corpus=UpperCAmelCase__ , max_order=UpperCAmelCase__ , smooth=UpperCAmelCase__ )
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 4
| 1
|
"""simple docstring"""
from __future__ import annotations
class __a :
def __init__( self , a__=None ):
_lowerCamelCase = data
_lowerCamelCase = None
def __repr__( self ):
_lowerCamelCase = []
_lowerCamelCase = self
while temp:
string_rep.append(F'{temp.data}' )
_lowerCamelCase = temp.next
return "->".join(a__ )
def SCREAMING_SNAKE_CASE_ ( snake_case : list )-> Optional[int]:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowerCamelCase = _lowerCamelCase = Node(elements_list[0] )
for i in range(1 , len(snake_case ) ):
_lowerCamelCase = Node(elements_list[i] )
_lowerCamelCase = current.next
return head
def SCREAMING_SNAKE_CASE_ ( snake_case : Node )-> None:
if head_node is not None and isinstance(snake_case , snake_case ):
print_reverse(head_node.next )
print(head_node.data )
def SCREAMING_SNAKE_CASE_ ( )-> Union[str, Any]:
from doctest import testmod
testmod()
_lowerCamelCase = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(snake_case )
print('Elements in Reverse:' )
print_reverse(snake_case )
if __name__ == "__main__":
main()
| 80
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : "DiagonalGaussianDistribution"
class __a ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = True
@register_to_config
def __init__( self , a__ = 3 , a__ = 3 , a__ = ("DownEncoderBlock2D",) , a__ = ("UpDecoderBlock2D",) , a__ = (64,) , a__ = 1 , a__ = "silu" , a__ = 4 , a__ = 32 , a__ = 32 , a__ = 0.18215 , ):
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=a__ , out_channels=a__ , down_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , act_fn=a__ , norm_num_groups=a__ , double_z=a__ , )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=a__ , out_channels=a__ , up_block_types=a__ , block_out_channels=a__ , layers_per_block=a__ , norm_num_groups=a__ , act_fn=a__ , )
_lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_lowerCamelCase = nn.Convad(a__ , a__ , 1 )
_lowerCamelCase = False
_lowerCamelCase = False
# only relevant if vae tiling is enabled
_lowerCamelCase = self.config.sample_size
_lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_lowerCamelCase = 0.25
def snake_case_ ( self , a__ , a__=False ):
if isinstance(a__ , (Encoder, Decoder) ):
_lowerCamelCase = value
def snake_case_ ( self , a__ = True ):
_lowerCamelCase = use_tiling
def snake_case_ ( self ):
self.enable_tiling(a__ )
def snake_case_ ( self ):
_lowerCamelCase = True
def snake_case_ ( self ):
_lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self ):
_lowerCamelCase = {}
def fn_recursive_add_processors(a__ , a__ , a__ ):
if hasattr(a__ , 'set_processor' ):
_lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , a__ , a__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ , a__ , a__ )
return processors
def snake_case_ ( self , a__ ):
_lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(a__ , a__ ) and len(a__ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a__ , a__ , a__ ):
if hasattr(a__ , 'set_processor' ):
if not isinstance(a__ , a__ ):
module.set_processor(a__ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , a__ , a__ )
for name, module in self.named_children():
fn_recursive_attn_processor(a__ , a__ , a__ )
def snake_case_ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def snake_case_ ( self , a__ , a__ = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(a__ , return_dict=a__ )
if self.use_slicing and x.shape[0] > 1:
_lowerCamelCase = [self.encoder(a__ ) for x_slice in x.split(1 )]
_lowerCamelCase = torch.cat(a__ )
else:
_lowerCamelCase = self.encoder(a__ )
_lowerCamelCase = self.quant_conv(a__ )
_lowerCamelCase = DiagonalGaussianDistribution(a__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a__ )
def snake_case_ ( self , a__ , a__ = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(a__ , return_dict=a__ )
_lowerCamelCase = self.post_quant_conv(a__ )
_lowerCamelCase = self.decoder(a__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
@apply_forward_hook
def snake_case_ ( self , a__ , a__ = True ):
if self.use_slicing and z.shape[0] > 1:
_lowerCamelCase = [self._decode(a__ ).sample for z_slice in z.split(1 )]
_lowerCamelCase = torch.cat(a__ )
else:
_lowerCamelCase = self._decode(a__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=a__ )
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = min(a.shape[2] , b.shape[2] , a__ )
for y in range(a__ ):
_lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = min(a.shape[3] , b.shape[3] , a__ )
for x in range(a__ ):
_lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def snake_case_ ( self , a__ , a__ = True ):
_lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowerCamelCase = []
for i in range(0 , x.shape[2] , a__ ):
_lowerCamelCase = []
for j in range(0 , x.shape[3] , a__ ):
_lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowerCamelCase = self.encoder(a__ )
_lowerCamelCase = self.quant_conv(a__ )
row.append(a__ )
rows.append(a__ )
_lowerCamelCase = []
for i, row in enumerate(a__ ):
_lowerCamelCase = []
for j, tile in enumerate(a__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , a__ , a__ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , a__ , a__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a__ , dim=3 ) )
_lowerCamelCase = torch.cat(a__ , dim=2 )
_lowerCamelCase = DiagonalGaussianDistribution(a__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=a__ )
def snake_case_ ( self , a__ , a__ = True ):
_lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowerCamelCase = []
for i in range(0 , z.shape[2] , a__ ):
_lowerCamelCase = []
for j in range(0 , z.shape[3] , a__ ):
_lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowerCamelCase = self.post_quant_conv(a__ )
_lowerCamelCase = self.decoder(a__ )
row.append(a__ )
rows.append(a__ )
_lowerCamelCase = []
for i, row in enumerate(a__ ):
_lowerCamelCase = []
for j, tile in enumerate(a__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , a__ , a__ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , a__ , a__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(a__ , dim=3 ) )
_lowerCamelCase = torch.cat(a__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
def snake_case_ ( self , a__ , a__ = False , a__ = True , a__ = None , ):
_lowerCamelCase = sample
_lowerCamelCase = self.encode(a__ ).latent_dist
if sample_posterior:
_lowerCamelCase = posterior.sample(generator=a__ )
else:
_lowerCamelCase = posterior.mode()
_lowerCamelCase = self.decode(a__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=a__ )
| 80
| 1
|
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _a ( UpperCamelCase_ : int = 100 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase__ = pre_numerator
lowerCAmelCase__ = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase__ = cur_numerator
lowerCAmelCase__ = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 340
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict , lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
__snake_case : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : Optional[Any] = PyTorchBenchmark(lowerCamelCase )
__snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
__snake_case : Any = '''sgugger/tiny-distilbert-classification'''
__snake_case : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , only_pretrain_model=lowerCamelCase , )
__snake_case : Optional[int] = PyTorchBenchmark(lowerCamelCase )
__snake_case : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case ( self : Tuple ) -> int:
__snake_case : str = '''sshleifer/tiny-gpt2'''
__snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , torchscript=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : str = PyTorchBenchmark(lowerCamelCase )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def __snake_case ( self : Union[str, Any] ) -> Any:
__snake_case : Optional[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , fpaa=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : Optional[int] = PyTorchBenchmark(lowerCamelCase )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : Optional[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Any = AutoConfig.from_pretrained(lowerCamelCase )
# set architectures equal to `None`
__snake_case : Union[str, Any] = None
__snake_case : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : List[Any] = PyTorchBenchmark(lowerCamelCase , configs=[config] )
__snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Tuple = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : Optional[Any] = PyTorchBenchmark(lowerCamelCase )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can\'t do half precision" )
def __snake_case ( self : Tuple ) -> int:
__snake_case : int = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCamelCase , multi_process=lowerCamelCase , )
__snake_case : Any = PyTorchBenchmark(lowerCamelCase )
__snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __snake_case ( self : int ) -> List[str]:
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase )
__snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : Any = PyTorchBenchmark(lowerCamelCase , configs=[config] )
__snake_case : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case ( self : List[str] ) -> Any:
__snake_case : Tuple = '''sshleifer/tinier_bart'''
__snake_case : Any = AutoConfig.from_pretrained(lowerCamelCase )
__snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : str = PyTorchBenchmark(lowerCamelCase , configs=[config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __snake_case ( self : int ) -> Optional[int]:
__snake_case : Tuple = '''sshleifer/tiny-gpt2'''
__snake_case : Any = AutoConfig.from_pretrained(lowerCamelCase )
__snake_case : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : str = PyTorchBenchmark(lowerCamelCase , configs=[config] )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __snake_case ( self : Any ) -> str:
__snake_case : List[Any] = '''sshleifer/tinier_bart'''
__snake_case : str = AutoConfig.from_pretrained(lowerCamelCase )
__snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
__snake_case : List[Any] = PyTorchBenchmark(lowerCamelCase , configs=[config] )
__snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __snake_case ( self : Any ) -> int:
__snake_case : Any = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , save_to_csv=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(lowerCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(lowerCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(lowerCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(lowerCamelCase , "env.csv" ) , multi_process=lowerCamelCase , )
__snake_case : Union[str, Any] = PyTorchBenchmark(lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , "env.csv" ) ).exists() )
def __snake_case ( self : str ) -> int:
__snake_case : int = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCamelCase : Optional[Any] ):
self.assertTrue(hasattr(lowerCamelCase , "sequential" ) )
self.assertTrue(hasattr(lowerCamelCase , "cumulative" ) )
self.assertTrue(hasattr(lowerCamelCase , "current" ) )
self.assertTrue(hasattr(lowerCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase , "log.txt" ) , log_print=lowerCamelCase , trace_memory_line_by_line=lowerCamelCase , multi_process=lowerCamelCase , )
__snake_case : Any = PyTorchBenchmark(lowerCamelCase )
__snake_case : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase , "log.txt" ) ).exists() )
| 352
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134
| 0
|
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : Dict ) -> Any:
"""simple docstring"""
lowercase__ = [0 for i in range(r + 1 )]
# nc0 = 1
lowercase__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowercase__ = min(__magic_name__ , __magic_name__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 305
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 1
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
__A = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__magic_name__ :Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__magic_name__ :Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__magic_name__ :Optional[int] = field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :str = field(
default=a , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__magic_name__ :str = field(
default=a , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__magic_name__ :Optional[bool] = field(
default=a , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__magic_name__ :str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__magic_name__ :bool = field(
default=a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ :List[Any] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase__ :Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ :int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ :int = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ :Tuple = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ :Optional[int] = train_dataset.features['label'].names
if training_args.do_eval:
lowerCAmelCase__ :Dict = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ :Dict = eval_dataset.features['label'].names
if training_args.do_predict:
lowerCAmelCase__ :str = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ :Union[str, Any] = predict_dataset.features['label'].names
# Labels
lowerCAmelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel={str(_SCREAMING_SNAKE_CASE ): label for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ :int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ :Union[str, Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ :Optional[int] = False
def preprocess_function(_SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=_SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ :str = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
lowerCAmelCase__ :Dict = train_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
lowerCAmelCase__ :List[str] = train_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ :Dict = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
lowerCAmelCase__ :Dict = eval_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
lowerCAmelCase__ :int = eval_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ :Optional[Any] = min(len(_SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
lowerCAmelCase__ :int = predict_dataset.select(range(_SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
lowerCAmelCase__ :Tuple = predict_dataset.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
lowerCAmelCase__ :Dict = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = p.predictions[0] if isinstance(p.predictions , _SCREAMING_SNAKE_CASE ) else p.predictions
lowerCAmelCase__ :Union[str, Any] = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ :Optional[int] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ :int = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ :List[str] = None
# Initialize our Trainer
lowerCAmelCase__ :Dict = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ :Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ :int = last_checkpoint
lowerCAmelCase__ :str = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = train_result.metrics
lowerCAmelCase__ :Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
lowerCAmelCase__ :Dict = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('train' , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Optional[Any] = trainer.evaluate(eval_dataset=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = trainer.predict(_SCREAMING_SNAKE_CASE , metric_key_prefix='predict' )
lowerCAmelCase__ :int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('predict' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('predict' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
lowerCAmelCase__ :Dict = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[int] = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 254
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """facebook/bart-large-mnli"""
__magic_name__ :Any = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__magic_name__ :Optional[int] = """text_classifier"""
__magic_name__ :List[Any] = AutoTokenizer
__magic_name__ :str = AutoModelForSequenceClassification
__magic_name__ :int = ["""text""", ["""text"""]]
__magic_name__ :int = ["""text"""]
def snake_case ( self ):
'''simple docstring'''
super().setup()
lowerCAmelCase__ :Any = self.model.config
lowerCAmelCase__ :Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase__ :Optional[Any] = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [F"This example is {label}" for label in labels] , return_tensors='pt' , padding='max_length' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = outputs.logits
lowerCAmelCase__ :int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 254
| 1
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = 16000 ):
UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
UpperCAmelCase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
__UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__UpperCamelCase = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
__UpperCamelCase = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__UpperCamelCase = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class A_ :
"""simple docstring"""
__UpperCamelCase = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
__UpperCamelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCamelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , lowercase_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ ):
UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
UpperCAmelCase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowercase_ )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ ):
UpperCAmelCase = [audio['array'] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(lowercase_ )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = raw_datasets['train'].features[data_args.label_column_name].names
UpperCAmelCase , UpperCAmelCase = {}, {}
for i, label in enumerate(lowercase_ ):
UpperCAmelCase = str(lowercase_ )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 78
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
a_ = True
def lowercase ( self : Optional[Any] ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : List[Any] , lowerCAmelCase_ : Tuple ) -> str:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase ( self : List[Any] , lowerCAmelCase_ : str ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.get_input_output_texts(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def lowercase ( self : List[str] ) -> Optional[int]:
pass # TODO add if relevant
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
pass # TODO add if relevant
def lowercase ( self : Union[str, Any] ) -> Any:
pass # TODO add if relevant
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : List[Any] ) -> int:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
try:
__lowerCAmelCase = MecabTokenizer(
do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : Union[str, Any] ) -> List[Any]:
__lowerCAmelCase = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase ( self : int ) -> str:
__lowerCAmelCase = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(lowerCAmelCase_ )
__lowerCAmelCase = 'こんにちは、世界。\nこんばんは、世界。'
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
__lowerCAmelCase = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(lowerCAmelCase_ , 'wb' ) as handle:
pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(lowerCAmelCase_ , 'rb' ) as handle:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_new.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_jumanpp
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = JumanppTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : Dict ) -> Dict:
__lowerCAmelCase = JumanppTokenizer(normalize_text=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase ( self : Any ) -> Any:
__lowerCAmelCase = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
__lowerCAmelCase = tokenizer.subword_tokenizer
__lowerCAmelCase = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(lowerCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
__lowerCAmelCase = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(lowerCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = BertJapaneseTokenizer
a_ = False
def lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **lowerCAmelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。'
__lowerCAmelCase = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase ( self : Dict ) -> str:
pass # TODO add if relevant
def lowercase ( self : Any ) -> str:
pass # TODO add if relevant
def lowercase ( self : List[Any] ) -> int:
pass # TODO add if relevant
def lowercase ( self : str ) -> str:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
__lowerCAmelCase = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
lowerCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase ( self : int ) -> str:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
__lowerCAmelCase = tokenizer.encode('ありがとう。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('どういたしまして。' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
__lowerCAmelCase = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 284
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__magic_name__ = "src/transformers"
__magic_name__ = "docs/source/en"
__magic_name__ = "."
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__magic_name__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__magic_name__ = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__magic_name__ = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase_ )
return [m.group(0 ) for m in matches]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 2 if text == """✅""" or text == """❌""" else len(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = (width - text_length) // 2
__SCREAMING_SNAKE_CASE = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__SCREAMING_SNAKE_CASE = {name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = collections.defaultdict(UpperCamelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = None
if attr_name.endswith("""Tokenizer""" ):
__SCREAMING_SNAKE_CASE = slow_tokenizers
__SCREAMING_SNAKE_CASE = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
__SCREAMING_SNAKE_CASE = fast_tokenizers
__SCREAMING_SNAKE_CASE = attr_name[:-13]
elif _re_tf_models.match(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = tf_models
__SCREAMING_SNAKE_CASE = _re_tf_models.match(UpperCamelCase_ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = flax_models
__SCREAMING_SNAKE_CASE = _re_flax_models.match(UpperCamelCase_ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase_ ) is not None:
__SCREAMING_SNAKE_CASE = pt_models
__SCREAMING_SNAKE_CASE = _re_pt_models.match(UpperCamelCase_ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
__SCREAMING_SNAKE_CASE = True
break
# Try again after removing the last word in the name
__SCREAMING_SNAKE_CASE = """""".join(camel_case_split(UpperCamelCase_ )[:-1] )
# Let's build that table!
__SCREAMING_SNAKE_CASE = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__SCREAMING_SNAKE_CASE = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__SCREAMING_SNAKE_CASE = [len(UpperCamelCase_ ) + 2 for c in columns]
__SCREAMING_SNAKE_CASE = max([len(UpperCamelCase_ ) for name in model_names] ) + 2
# Build the table per se
__SCREAMING_SNAKE_CASE = """|""" + """|""".join([_center_text(UpperCamelCase_ , UpperCamelCase_ ) for c, w in zip(UpperCamelCase_ , UpperCamelCase_ )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
__SCREAMING_SNAKE_CASE = {True: """✅""", False: """❌"""}
for name in model_names:
__SCREAMING_SNAKE_CASE = model_name_to_prefix[name]
__SCREAMING_SNAKE_CASE = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCamelCase_ , UpperCamelCase_ ) for l, w in zip(UpperCamelCase_ , UpperCamelCase_ )] ) + "|\n"
return table
def _lowerCAmelCase ( UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , )
__SCREAMING_SNAKE_CASE = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCamelCase_ , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__magic_name__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 350
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""")
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""")
__SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__).loss
__SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(lowerCAmelCase__).numpy()
__SCREAMING_SNAKE_CASE = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2E-4)
| 255
| 0
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=1_6 , _UpperCAmelCase=3_6 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ) -> Optional[Any]:
__UpperCamelCase : int = parent
__UpperCamelCase : str = batch_size
__UpperCamelCase : Any = seq_length
__UpperCamelCase : Union[str, Any] = is_training
__UpperCamelCase : List[Any] = use_input_mask
__UpperCamelCase : Union[str, Any] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[int] = vocab_size
__UpperCamelCase : int = embedding_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : int = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_hidden_groups
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Tuple = hidden_act
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : str = max_position_embeddings
__UpperCamelCase : str = type_vocab_size
__UpperCamelCase : Union[str, Any] = type_sequence_label_size
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Optional[int] = num_labels
__UpperCamelCase : Union[str, Any] = num_choices
__UpperCamelCase : Union[str, Any] = scope
def a_ (self ) -> List[str]:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : str = None
if self.use_input_mask:
__UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Dict = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : str = None
__UpperCamelCase : Dict = None
__UpperCamelCase : int = None
if self.use_labels:
__UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ (self ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__UpperCamelCase : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : str = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__UpperCamelCase : Tuple = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
__UpperCamelCase : Union[str, Any] = self.num_labels
__UpperCamelCase : Union[str, Any] = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : Dict = self.num_labels
__UpperCamelCase : Optional[Any] = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.num_choices
__UpperCamelCase : List[str] = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase : Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ (self ) -> str:
__UpperCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[str] = config_and_inputs
__UpperCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = True
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> List[str]:
__UpperCamelCase : List[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__UpperCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[int] = AlbertModelTester(self )
__UpperCamelCase : Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> str:
self.config_tester.run_common_tests()
def a_ (self ) -> List[str]:
__UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> int:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def a_ (self ) -> Dict:
__UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase : int = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def a_ (self ) -> Any:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Dict = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> str:
__UpperCamelCase : Union[str, Any] = AlbertModel.from_pretrained("albert-base-v2" )
__UpperCamelCase : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__UpperCamelCase : Any = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : List[str] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
| 298
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def __lowerCAmelCase ( ):
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
__UpperCamelCase : Optional[Any] = parser.parse_args()
return args.f
def __lowerCAmelCase ( snake_case__ , snake_case__="eval" ):
__UpperCamelCase : List[str] = os.path.join(snake_case__ , F"{split}_results.json" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , "r" ) as f:
return json.load(snake_case__ )
raise ValueError(F"can't find {path}" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : List[str] = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_glue.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Any = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_clm_flax.main()
__UpperCamelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def a_ (self ) -> str:
__UpperCamelCase : Any = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_summarization_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def a_ (self ) -> int:
__UpperCamelCase : int = self.get_auto_remove_tmp_dir()
__UpperCamelCase : str = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_mlm_flax.main()
__UpperCamelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Tuple = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_ta_mlm_flax.main()
__UpperCamelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def a_ (self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase : Union[str, Any] = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Optional[Any] = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_flax_ner.main()
__UpperCamelCase : int = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def a_ (self ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
__UpperCamelCase : Dict = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
run_qa.main()
__UpperCamelCase : List[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
| 1
|
import operator
def a_ ( __lowercase : list , __lowercase : bool = False , __lowercase : list | None = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(__lowercase ):
if _operator(__lowercase , sublist[-1] ):
sublist.append(__lowercase )
arr.pop(__lowercase )
# merging sublist into solution list
if not solution:
solution.extend(__lowercase )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(__lowercase ):
if not _operator(__lowercase , __lowercase ):
solution.insert(__lowercase , __lowercase )
break
else:
solution.append(__lowercase )
strand_sort(__lowercase , __lowercase , __lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 130
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
# General docstring
_lowerCamelCase : Dict = '''PoolFormerConfig'''
# Base docstring
_lowerCamelCase : int = '''sail/poolformer_s12'''
_lowerCamelCase : Optional[Any] = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase : Optional[int] = '''sail/poolformer_s12'''
_lowerCamelCase : List[Any] = '''tabby, tabby cat'''
_lowerCamelCase : List[str] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a_ ( __lowercase : List[Any] , __lowercase : float = 0.0 , __lowercase : bool = False ) -> Optional[int]:
if drop_prob == 0.0 or not training:
return input
_snake_case = 1 - drop_prob
_snake_case = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_snake_case = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_snake_case = input.div(__lowercase ) * random_tensor
return output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[float] = None ):
'''simple docstring'''
super().__init__()
_snake_case = drop_prob
def A ( self : Any , lowercase : torch.Tensor ):
'''simple docstring'''
return drop_path(lowercase , self.drop_prob , self.training )
def A ( self : Tuple ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : int , lowercase : Optional[Any] , lowercase : str=None ):
'''simple docstring'''
super().__init__()
_snake_case = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size)
_snake_case = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride)
_snake_case = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding)
_snake_case = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase )
_snake_case = norm_layer(lowercase ) if norm_layer else nn.Identity()
def A ( self : int , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.projection(lowercase )
_snake_case = self.norm(lowercase )
return embeddings
class SCREAMING_SNAKE_CASE__ ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Dict , lowercase : List[Any] , **lowercase : str ):
'''simple docstring'''
super().__init__(1 , lowercase , **lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowercase : List[Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase )
def A ( self : int , lowercase : List[str] ):
'''simple docstring'''
return self.pool(lowercase ) - hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = nn.Convad(lowercase , lowercase , 1 )
_snake_case = PoolFormerDropPath(lowercase )
if isinstance(config.hidden_act , lowercase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
_snake_case = self.conva(lowercase )
_snake_case = self.act_fn(lowercase )
_snake_case = self.drop(lowercase )
_snake_case = self.conva(lowercase )
_snake_case = self.drop(lowercase )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , lowercase : Tuple , lowercase : int , lowercase : str , lowercase : Union[str, Any] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case = PoolFormerPooling(lowercase )
_snake_case = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
_snake_case = PoolFormerGroupNorm(lowercase )
# Useful for training neural nets
_snake_case = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity()
_snake_case = config.use_layer_scale
if config.use_layer_scale:
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
_snake_case = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase )
def A ( self : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
if self.use_layer_scale:
_snake_case = self.pooling(self.before_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = ()
_snake_case = self.output(self.after_norm(lowercase ) )
_snake_case = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_snake_case = hidden_states + self.drop_path(lowercase )
_snake_case = (output,) + outputs
return outputs
else:
_snake_case = self.drop_path(self.pooling(self.before_norm(lowercase ) ) )
# First residual connection
_snake_case = pooling_output + hidden_states
_snake_case = ()
# Second residual connection inside the PoolFormerOutput block
_snake_case = self.drop_path(self.output(self.after_norm(lowercase ) ) )
_snake_case = hidden_states + layer_output
_snake_case = (output,) + outputs
return outputs
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
super().__init__()
_snake_case = config
# stochastic depth decay rule
_snake_case = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_snake_case = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_snake_case = nn.ModuleList(lowercase )
# Transformer blocks
_snake_case = []
_snake_case = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_snake_case = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase ) )
_snake_case = nn.ModuleList(lowercase )
def A ( self : Any , lowercase : List[str] , lowercase : str=False , lowercase : Tuple=True ):
'''simple docstring'''
_snake_case = () if output_hidden_states else None
_snake_case = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_snake_case , _snake_case = layers
# Get patch embeddings from hidden_states
_snake_case = embedding_layer(lowercase )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase ):
_snake_case = blk(lowercase )
_snake_case = layer_outputs[0]
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = PoolFormerConfig
_UpperCAmelCase : Optional[int] = "poolformer"
_UpperCAmelCase : str = "pixel_values"
_UpperCAmelCase : int = True
def A ( self : Tuple , lowercase : str ):
'''simple docstring'''
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def A ( self : Optional[Any] , lowercase : str , lowercase : Dict=False ):
'''simple docstring'''
if isinstance(lowercase , lowercase ):
_snake_case = value
_lowerCamelCase : Optional[Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCamelCase : Tuple = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : List[Any] ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config
_snake_case = PoolFormerEncoder(lowercase )
# Initialize weights and apply final processing
self.post_init()
def A ( self : List[str] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : Tuple , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_snake_case = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Linear(config.hidden_size , config.hidden_size )
def A ( self : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = self.dense(lowercase )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " ,UpperCAmelCase ,)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowercase : Any ):
'''simple docstring'''
super().__init__(lowercase )
_snake_case = config.num_labels
_snake_case = PoolFormerModel(lowercase )
# Final norm
_snake_case = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_snake_case = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , lowercase : Optional[torch.FloatTensor] = None , lowercase : Optional[torch.LongTensor] = None , lowercase : Optional[bool] = None , lowercase : Optional[bool] = None , ):
'''simple docstring'''
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.poolformer(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , )
_snake_case = outputs[0]
_snake_case = self.classifier(self.norm(lowercase ).mean([-2, -1] ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = 'single_label_classification'
else:
_snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(lowercase , lowercase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 130
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: int = MobileBertTokenizer
__magic_name__: List[Any] = MobileBertTokenizerFast
__magic_name__: List[str] = True
__magic_name__: List[Any] = True
__magic_name__: Union[str, Any] = filter_non_english
__magic_name__: str = "google/mobilebert-uncased"
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
super().setUp()
snake_case_ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case_ : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase_ ( self : Optional[int] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = 'UNwant\u00E9d,running'
snake_case_ : Optional[int] = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.tokenizer_class(self.vocab_file )
snake_case_ : Any = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_A , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Any = self.get_rust_tokenizer()
snake_case_ : str = 'UNwant\u00E9d,running'
snake_case_ : str = tokenizer.tokenize(_A )
snake_case_ : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
snake_case_ : Optional[Any] = tokenizer.encode(_A , add_special_tokens=_A )
snake_case_ : List[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
snake_case_ : Any = self.get_rust_tokenizer()
snake_case_ : Union[str, Any] = tokenizer.encode(_A )
snake_case_ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
snake_case_ : str = self.get_tokenizer(do_lower_case=_A )
snake_case_ : Dict = self.get_rust_tokenizer(do_lower_case=_A )
snake_case_ : List[str] = 'UNwant\u00E9d,running'
snake_case_ : Tuple = tokenizer.tokenize(_A )
snake_case_ : Dict = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
snake_case_ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
snake_case_ : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
snake_case_ : Tuple = self.get_rust_tokenizer()
snake_case_ : Dict = tokenizer.encode(_A )
snake_case_ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
snake_case_ : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case_ : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self : int ) -> Tuple:
"""simple docstring"""
snake_case_ : str = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
snake_case_ : str = BasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
snake_case_ : Any = {}
for i, token in enumerate(_A ):
snake_case_ : str = i
snake_case_ : Any = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCAmelCase_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCAmelCase_ ( self : Dict ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
snake_case_ : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_A )
snake_case_ : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=_A )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
snake_case_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case_ : List[str] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
snake_case_ : Optional[Any] = tokenizer_r.do_lower_case if hasattr(_A , 'do_lower_case' ) else False
snake_case_ : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = ['的', '人', '有']
snake_case_ : Any = ''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ : int = True
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : List[Any] = tokenizer_p.encode(_A , add_special_tokens=_A )
snake_case_ : Union[str, Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
snake_case_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_A )
snake_case_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
snake_case_ : Union[str, Any] = False
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : Any = self.tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
snake_case_ : List[Any] = tokenizer_p.encode(_A , add_special_tokens=_A )
snake_case_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_A )
snake_case_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 327
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCamelCase__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , _A=None , **_A ) -> Any:
super().__init__(features=_A )
SCREAMING_SNAKE_CASE_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _UpperCamelCase ( self , _A ) -> Dict:
import torch
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_A )
return column
def _UpperCamelCase ( self , _A ) -> Tuple:
import torch
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE_ = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
SCREAMING_SNAKE_CASE_ = {'dtype': torch.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE_ = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A )
return torch.tensor(_A , **{**default_dtype, **self.torch_tensor_kwargs} )
def _UpperCamelCase ( self , _A ) -> int:
import torch
# support for torch, tf, jax etc.
if hasattr(_A , '''__array__''' ) and not isinstance(_A , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def _UpperCamelCase ( self , _A ) -> Union[str, Any]:
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def _UpperCamelCase ( self , _A ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(_A )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def _UpperCamelCase ( self , _A ) -> "torch.Tensor":
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(_A )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(_A )
SCREAMING_SNAKE_CASE_ = self._consolidate(_A )
return column
def _UpperCamelCase ( self , _A ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(_A )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(_A )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(_A )
for column_name in batch:
SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name] )
return batch
| 356
|
from __future__ import annotations
__UpperCAmelCase = 10
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = max(__lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
SCREAMING_SNAKE_CASE_ = [[] for _ in range(__lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
SCREAMING_SNAKE_CASE_ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCamelCase )
# put each buckets' contents into list_of_ints
SCREAMING_SNAKE_CASE_ = 0
for b in range(__lowerCamelCase ):
for i in buckets[b]:
SCREAMING_SNAKE_CASE_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : int = tau * frequency / samplerate
lowerCAmelCase : Union[str, Any] = sin(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = cos(_UpperCAmelCase )
lowerCAmelCase : Any = _sin / (2 * q_factor)
lowerCAmelCase : List[str] = (1 - _cos) / 2
lowerCAmelCase : Optional[int] = 1 - _cos
lowerCAmelCase : Optional[int] = 1 + alpha
lowerCAmelCase : Any = -2 * _cos
lowerCAmelCase : Any = 1 - alpha
lowerCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : str = tau * frequency / samplerate
lowerCAmelCase : List[str] = sin(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = cos(_UpperCAmelCase )
lowerCAmelCase : str = _sin / (2 * q_factor)
lowerCAmelCase : str = (1 + _cos) / 2
lowerCAmelCase : Any = -1 - _cos
lowerCAmelCase : Optional[Any] = 1 + alpha
lowerCAmelCase : List[Any] = -2 * _cos
lowerCAmelCase : List[Any] = 1 - alpha
lowerCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : Dict = tau * frequency / samplerate
lowerCAmelCase : Any = sin(_UpperCAmelCase )
lowerCAmelCase : List[Any] = cos(_UpperCAmelCase )
lowerCAmelCase : int = _sin / (2 * q_factor)
lowerCAmelCase : List[Any] = _sin / 2
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Dict = -ba
lowerCAmelCase : Optional[int] = 1 + alpha
lowerCAmelCase : Dict = -2 * _cos
lowerCAmelCase : Optional[Any] = 1 - alpha
lowerCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ) ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : int = tau * frequency / samplerate
lowerCAmelCase : Any = sin(_UpperCAmelCase )
lowerCAmelCase : Dict = cos(_UpperCAmelCase )
lowerCAmelCase : Dict = _sin / (2 * q_factor)
lowerCAmelCase : Optional[Any] = 1 - alpha
lowerCAmelCase : Union[str, Any] = -2 * _cos
lowerCAmelCase : Optional[Any] = 1 + alpha
lowerCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba], [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ), ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : Optional[int] = tau * frequency / samplerate
lowerCAmelCase : Union[str, Any] = sin(_UpperCAmelCase )
lowerCAmelCase : int = cos(_UpperCAmelCase )
lowerCAmelCase : List[Any] = _sin / (2 * q_factor)
lowerCAmelCase : str = 10 ** (gain_db / 40)
lowerCAmelCase : Optional[Any] = 1 + alpha * big_a
lowerCAmelCase : Dict = -2 * _cos
lowerCAmelCase : Dict = 1 - alpha * big_a
lowerCAmelCase : List[str] = 1 + alpha / big_a
lowerCAmelCase : int = -2 * _cos
lowerCAmelCase : Any = 1 - alpha / big_a
lowerCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ), ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : int = tau * frequency / samplerate
lowerCAmelCase : int = sin(_UpperCAmelCase )
lowerCAmelCase : Dict = cos(_UpperCAmelCase )
lowerCAmelCase : int = _sin / (2 * q_factor)
lowerCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
lowerCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase : Any = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase : int = 2 * sqrt(_UpperCAmelCase ) * alpha
lowerCAmelCase : Optional[int] = big_a * (pmc + aaa)
lowerCAmelCase : str = 2 * big_a * mpc
lowerCAmelCase : Dict = big_a * (pmc - aaa)
lowerCAmelCase : str = ppmc + aaa
lowerCAmelCase : List[Any] = -2 * pmpc
lowerCAmelCase : Tuple = ppmc - aaa
lowerCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 1 / sqrt(2 ), ) -> IIRFilter:
'''simple docstring'''
lowerCAmelCase : List[Any] = tau * frequency / samplerate
lowerCAmelCase : Any = sin(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = cos(_UpperCAmelCase )
lowerCAmelCase : Optional[Any] = _sin / (2 * q_factor)
lowerCAmelCase : List[str] = 10 ** (gain_db / 40)
lowerCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
lowerCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
lowerCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
lowerCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
lowerCAmelCase : Dict = 2 * sqrt(_UpperCAmelCase ) * alpha
lowerCAmelCase : Any = big_a * (ppmc + aaa)
lowerCAmelCase : List[str] = -2 * big_a * pmpc
lowerCAmelCase : List[str] = big_a * (ppmc - aaa)
lowerCAmelCase : Optional[int] = pmc + aaa
lowerCAmelCase : Optional[Any] = 2 * mpc
lowerCAmelCase : List[Any] = pmc - aaa
lowerCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa], [ba, ba, ba] )
return filt
| 138
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
lowerCAmelCase_ : int = 1_0000
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ : List[Any] = ParquetConfig
def lowercase__ ( self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Dict ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowerCAmelCase : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase_ , (str, list, tuple) ):
lowerCAmelCase : int = data_files
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase : Any = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase : Dict = [dl_manager.iter_files(UpperCAmelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'rb' ) as f:
lowerCAmelCase : int = datasets.Features.from_arrow_schema(pq.read_schema(UpperCAmelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCAmelCase_ , gen_kwargs={'files': files} ) )
return splits
def lowercase__ ( self : Any , UpperCAmelCase_ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase : Union[str, Any] = table_cast(UpperCAmelCase_ , self.info.features.arrow_schema )
return pa_table
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase_ ) ):
with open(UpperCAmelCase_ , 'rb' ) as f:
lowerCAmelCase : Optional[Any] = pq.ParquetFile(UpperCAmelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(UpperCAmelCase_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(UpperCAmelCase_ )}: {e}" )
raise
| 138
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a__ : Dict = logging.get_logger(__name__)
a__ : Dict[Optional[str], Type[Formatter]] = {}
a__ : Dict[Optional[str], str] = {}
a__ : Dict[Optional[str], Exception] = {}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
__SCREAMING_SNAKE_CASE = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
__SCREAMING_SNAKE_CASE = format_type
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__SCREAMING_SNAKE_CASE = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
a__ : Any = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
a__ : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
a__ : str = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase__ (lowerCAmelCase_ , **lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_format_type_from_alias(lowerCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 195
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase = False
@skip_mps
class lowerCAmelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case_ ( cls ) -> Optional[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=SCREAMING_SNAKE_CASE_, set_alpha_to_one=SCREAMING_SNAKE_CASE_, )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
UpperCamelCase : int = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
UpperCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Optional[Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = 'cpu'
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 64, 64, 3) )
UpperCamelCase : str = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
UpperCamelCase : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1e-3 )
def snake_case_ ( self ) -> str:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def snake_case_ ( self ) -> List[str]:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7e-4 )
def snake_case_ ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def snake_case_ ( self ) -> List[str]:
super().test_save_load_local(expected_max_difference=5e-4 )
def snake_case_ ( self ) -> Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
@classmethod
def snake_case_ ( cls ) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls ) -> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = torch.manual_seed(51 )
UpperCamelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', safety_checker=SCREAMING_SNAKE_CASE_, torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase : int = 'a painting of an elephant with glasses'
UpperCamelCase : List[Any] = [5, 7]
UpperCamelCase : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_, token_indices=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=5, max_iter_to_alter=5, output_type='numpy', ).images[0]
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 119
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCamelCase : Optional[Any] = False
@skip_mps
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase__ = False
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , )
a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , )
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
a = CLIPTextModel(__magic_name__ )
a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[int] , __magic_name__ :Optional[int]=0 ):
'''simple docstring'''
if str(__magic_name__ ).startswith("""mps""" ):
a = torch.manual_seed(__magic_name__ )
else:
a = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a = a = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = """cpu"""
a = self.get_dummy_components()
a = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a = self.get_dummy_inputs(__magic_name__ )
a = pipe(**__magic_name__ ).images
a = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
a = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
a = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__magic_name__ , 1E-3 )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls :int ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__magic_name__ )
@classmethod
def lowerCamelCase__ ( cls :Any ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = torch.manual_seed(51 )
a = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
a = """a painting of an elephant with glasses"""
a = [5, 7]
a = pipe(
prompt=__magic_name__ , token_indices=__magic_name__ , guidance_scale=7.5 , generator=__magic_name__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
a = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 228
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = (UniPCMultistepScheduler,)
UpperCAmelCase_ :Optional[int] = (("num_inference_steps", 25),)
def __lowerCAmelCase ( self , **__A ) -> List[str]:
lowerCAmelCase_ :int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**__A )
return config
def __lowerCAmelCase ( self , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = dict(self.forward_default_kwargs )
lowerCAmelCase_ :Any = kwargs.pop("""num_inference_steps""" , __A )
lowerCAmelCase_ :List[str] = self.dummy_sample
lowerCAmelCase_ :int = 0.1 * sample
lowerCAmelCase_ :Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :int = self.get_scheduler_config(**__A )
lowerCAmelCase_ :List[str] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
lowerCAmelCase_ :List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
lowerCAmelCase_ :Optional[int] = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
lowerCAmelCase_ :int = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = sample, sample
for t in range(__A , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ :List[Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :str = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , __A=0 , **__A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ :Optional[Any] = kwargs.pop("""num_inference_steps""" , __A )
lowerCAmelCase_ :str = self.dummy_sample
lowerCAmelCase_ :Union[str, Any] = 0.1 * sample
lowerCAmelCase_ :Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :Tuple = self.get_scheduler_config()
lowerCAmelCase_ :Any = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ :Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
lowerCAmelCase_ :str = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ :Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ :Union[str, Any] = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :Optional[Any] = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self , __A=None , **__A ) -> List[Any]:
if scheduler is None:
lowerCAmelCase_ :List[str] = self.scheduler_classes[0]
lowerCAmelCase_ :str = self.get_scheduler_config(**__A )
lowerCAmelCase_ :Optional[Any] = scheduler_class(**__A )
lowerCAmelCase_ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ :List[Any] = self.get_scheduler_config(**__A )
lowerCAmelCase_ :List[Any] = scheduler_class(**__A )
lowerCAmelCase_ :Any = 10
lowerCAmelCase_ :List[str] = self.dummy_model()
lowerCAmelCase_ :Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ :List[Any] = model(__A , __A )
lowerCAmelCase_ :Any = scheduler.step(__A , __A , __A ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ :List[str] = kwargs.pop("""num_inference_steps""" , __A )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :Any = self.get_scheduler_config()
lowerCAmelCase_ :str = scheduler_class(**__A )
lowerCAmelCase_ :Any = self.dummy_sample
lowerCAmelCase_ :Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ):
lowerCAmelCase_ :List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ :List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
lowerCAmelCase_ :List[str] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase_ :Any = scheduler.timesteps[5]
lowerCAmelCase_ :List[str] = scheduler.timesteps[6]
lowerCAmelCase_ :Any = scheduler.step(__A , __A , __A , **__A ).prev_sample
lowerCAmelCase_ :int = scheduler.step(__A , __A , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self ) -> str:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase_ :List[str] = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ :Union[str, Any] = self.full_loop(scheduler=__A )
lowerCAmelCase_ :int = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
lowerCAmelCase_ :Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ :List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ :Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ :List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ :Union[str, Any] = self.full_loop(scheduler=__A )
lowerCAmelCase_ :List[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowerCAmelCase ( self ) -> Dict:
self.check_over_configs(thresholding=__A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , solver_order=__A , solver_type=__A , )
def __lowerCAmelCase ( self ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __lowerCAmelCase ( self ) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__A , solver_type=__A , prediction_type=__A , )
lowerCAmelCase_ :Dict = self.full_loop(
solver_order=__A , solver_type=__A , prediction_type=__A , )
assert not torch.isnan(__A ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self ) -> Optional[int]:
self.check_over_configs(lower_order_final=__A )
self.check_over_configs(lower_order_final=__A )
def __lowerCAmelCase ( self ) -> str:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__A , time_step=0 )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.full_loop()
lowerCAmelCase_ :Any = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase_ :str = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = self.scheduler_classes[0]
lowerCAmelCase_ :List[str] = self.get_scheduler_config(thresholding=__A , dynamic_thresholding_ratio=0 )
lowerCAmelCase_ :int = scheduler_class(**__A )
lowerCAmelCase_ :List[Any] = 10
lowerCAmelCase_ :Tuple = self.dummy_model()
lowerCAmelCase_ :Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ :List[Any] = model(__A , __A )
lowerCAmelCase_ :Tuple = scheduler.step(__A , __A , __A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCAmelCase ( self , **__A ) -> List[str]:
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ :Union[str, Any] = self.get_scheduler_config(**__A )
lowerCAmelCase_ :int = scheduler_class(**__A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 208
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =["""image_processor""", """tokenizer"""]
a_ ="""OwlViTImageProcessor"""
a_ =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , _a : str=None , _a : Optional[Any]=None , **_a : int ) -> List[str]:
__lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _a , )
__lowerCamelCase : Tuple = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_a , _a )
def __call__( self : Tuple , _a : Tuple=None , _a : int=None , _a : List[Any]=None , _a : List[Any]="max_length" , _a : Union[str, Any]="np" , **_a : Union[str, Any] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(_a , _a ) or (isinstance(_a , _a ) and not isinstance(text[0] , _a )):
__lowerCamelCase : Any = [self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )]
elif isinstance(_a , _a ) and isinstance(text[0] , _a ):
__lowerCamelCase : List[Any] = []
# Maximum number of queries across batch
__lowerCamelCase : str = max([len(_a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_a ) != max_num_queries:
__lowerCamelCase : List[Any] = t + [' '] * (max_num_queries - len(_a ))
__lowerCamelCase : Dict = self.tokenizer(_a , padding=_a , return_tensors=_a , **_a )
encodings.append(_a )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__lowerCamelCase : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : str = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowerCamelCase : int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : List[str] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowerCamelCase : str = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__lowerCamelCase : Optional[Any] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowerCamelCase : List[Any] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__lowerCamelCase : int = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__lowerCamelCase : Any = BatchEncoding()
__lowerCamelCase : Dict = input_ids
__lowerCamelCase : str = attention_mask
if query_images is not None:
__lowerCamelCase : Optional[int] = BatchEncoding()
__lowerCamelCase : List[Any] = self.image_processor(
_a , return_tensors=_a , **_a ).pixel_values
__lowerCamelCase : str = query_pixel_values
if images is not None:
__lowerCamelCase : Union[str, Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
__lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowerCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def _lowercase ( self : Optional[Any] , *_a : List[str] , **_a : Dict ) -> int:
return self.image_processor.post_process(*_a , **_a )
def _lowercase ( self : str , *_a : str , **_a : List[str] ) -> int:
return self.image_processor.post_process_object_detection(*_a , **_a )
def _lowercase ( self : int , *_a : List[Any] , **_a : Optional[int] ) -> str:
return self.image_processor.post_process_image_guided_detection(*_a , **_a )
def _lowercase ( self : Tuple , *_a : Optional[Any] , **_a : List[Any] ) -> Tuple:
return self.tokenizer.batch_decode(*_a , **_a )
def _lowercase ( self : str , *_a : Optional[Any] , **_a : str ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def _lowercase ( self : Any ) -> Any:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _a , )
return self.image_processor_class
@property
def _lowercase ( self : Union[str, Any] ) -> Any:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _a , )
return self.image_processor
| 208
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : List[str]=7 , _lowercase : Optional[int]=3 , _lowercase : Union[str, Any]=18 , _lowercase : Optional[int]=30 , _lowercase : int=4_00 , _lowercase : int=True , _lowercase : Dict=None , _lowercase : Any=True , _lowercase : List[str]=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=True , _lowercase : List[Any]=[0.5, 0.5, 0.5] , _lowercase : str=[0.5, 0.5, 0.5] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size if size is not None else {'''height''': 18, '''width''': 20}
__UpperCAmelCase = do_thumbnail
__UpperCAmelCase = do_align_axis
__UpperCAmelCase = do_pad
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
def a ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
a__ : str = DonutImageProcessor if is_vision_available() else None
def a ( self : Dict ):
__UpperCAmelCase = DonutImageProcessingTester(self )
@property
def a ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Tuple ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_pad''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def a ( self : Dict ):
pass
@is_flaky()
def a ( self : Dict ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def a ( self : int ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def a ( self : str ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 367
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowercase : Any = True
except ImportError:
_lowercase : str = False
try:
from torch.hub import _get_torch_home
_lowercase : Any = _get_torch_home()
except ImportError:
_lowercase : Dict = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
_lowercase : Tuple = os.path.join(torch_cache_home, 'transformers')
_lowercase : int = 'https://cdn.huggingface.co'
_lowercase : Union[str, Any] = 'https://s3.amazonaws.com/models.huggingface.co/bert'
_lowercase : str = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
_lowercase : str = os.path.join(PATH, 'config.yaml')
_lowercase : int = os.path.join(PATH, 'attributes.txt')
_lowercase : List[str] = os.path.join(PATH, 'objects.txt')
_lowercase : Optional[int] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
_lowercase : int = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
_lowercase : Dict = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
_lowercase : Union[str, Any] = 'pytorch_model.bin'
_lowercase : List[str] = 'config.yaml'
def lowercase__ ( snake_case_ :int=OBJECTS , snake_case_ :Optional[int]=ATTRIBUTES ):
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__UpperCAmelCase = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = OrderedDict()
with open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = pkl.load(snake_case_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__UpperCAmelCase = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
__UpperCAmelCase = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
__UpperCAmelCase = v
return r
class _UpperCAmelCase :
a__ : Tuple = {}
def __init__( self : List[str] , _lowercase : dict , _lowercase : str = "root" , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = name
__UpperCAmelCase = level
__UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__UpperCAmelCase = copy.deepcopy(_lowercase )
__UpperCAmelCase = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = Config(_lowercase , name=_lowercase , level=level + 1 )
__UpperCAmelCase = v
setattr(self , _lowercase , _lowercase )
__UpperCAmelCase = d
def __repr__( self : Any ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = key.split('''.''' )
__UpperCAmelCase = len(_lowercase ) - 1
__UpperCAmelCase = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , '''.'''.join(levels[i:] ) , _lowercase )
if l == last_level:
__UpperCAmelCase = val
else:
__UpperCAmelCase = pointer[l]
def a ( self : int ):
return self._pointer
def a ( self : List[str] , _lowercase : Dict , _lowercase : str ):
with open(F'''{file_name}''' , '''w''' ) as stream:
dump(_lowercase , _lowercase )
def a ( self : int , _lowercase : Dict , _lowercase : Tuple ):
with open(F'''{file_name}''' , '''w''' ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def a ( _lowercase : str ):
with open(_lowercase ) as stream:
__UpperCAmelCase = load(_lowercase , Loader=_lowercase )
return data
def __str__( self : Dict ):
__UpperCAmelCase = ''' '''
if self._name != "root":
__UpperCAmelCase = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__UpperCAmelCase = ''''''
__UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n'''
__UpperCAmelCase = level
return r[:-1]
@classmethod
def a ( cls : str , _lowercase : str , **_lowercase : Any ):
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def a ( cls : Any , _lowercase : str , **_lowercase : str ):
__UpperCAmelCase = kwargs.pop('''cache_dir''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''force_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''resume_download''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''proxies''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''local_files_only''' , _lowercase )
if os.path.isdir(_lowercase ):
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
__UpperCAmelCase = pretrained_model_name_or_path
else:
__UpperCAmelCase = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
__UpperCAmelCase = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__UpperCAmelCase = Config.load_yaml(_lowercase )
except EnvironmentError:
__UpperCAmelCase = '''Can\'t load config for'''
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_lowercase ), kwargs
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__UpperCAmelCase = in_tensor.numpy()
__UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :List[str]=True ):
__UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__UpperCAmelCase = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( snake_case_ :str , snake_case_ :Tuple , snake_case_ :List[str]=None , snake_case_ :List[str]=0 , snake_case_ :List[Any]=None , ):
__UpperCAmelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
__UpperCAmelCase = {'''user-agent''': ua}
if resume_size > 0:
__UpperCAmelCase = '''bytes=%d-''' % (resume_size,)
__UpperCAmelCase = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 416: # Range not satisfiable
return
__UpperCAmelCase = response.headers.get('''Content-Length''' )
__UpperCAmelCase = resume_size + int(snake_case_ ) if content_length is not None else None
__UpperCAmelCase = tqdm(
unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str=None , snake_case_ :Optional[int]=False , snake_case_ :List[Any]=None , snake_case_ :List[Any]=10 , snake_case_ :Optional[int]=False , snake_case_ :List[str]=None , snake_case_ :Union[str, Any]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__UpperCAmelCase = None
if not local_files_only:
try:
__UpperCAmelCase = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 200:
__UpperCAmelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__UpperCAmelCase = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
__UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__UpperCAmelCase = cache_path + '''.lock'''
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__UpperCAmelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , '''a+b''' ) as f:
yield f
__UpperCAmelCase = _resumable_file_manager
if os.path.exists(snake_case_ ):
__UpperCAmelCase = os.stat(snake_case_ ).st_size
else:
__UpperCAmelCase = 0
else:
__UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
__UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
__UpperCAmelCase = {'''url''': url, '''etag''': etag}
__UpperCAmelCase = cache_path + '''.json'''
with open(snake_case_ , '''w''' ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowercase__ ( snake_case_ :int , snake_case_ :str=None ):
__UpperCAmelCase = url.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
__UpperCAmelCase = url_hash.hexdigest()
if etag:
__UpperCAmelCase = etag.encode('''utf-8''' )
__UpperCAmelCase = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[int]=None , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=None , snake_case_ :Any=False , snake_case_ :int=False , snake_case_ :Optional[int]=False , ):
if cache_dir is None:
__UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
__UpperCAmelCase = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
__UpperCAmelCase = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(snake_case_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__UpperCAmelCase , __UpperCAmelCase = os.path.split(snake_case_ )
__UpperCAmelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__UpperCAmelCase = output_path + '''.lock'''
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , '''r''' ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
__UpperCAmelCase = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) )
return output_path_extracted
return output_path
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any]="," ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
__UpperCAmelCase = eval(f.read() )
else:
__UpperCAmelCase = requests.get(snake_case_ )
try:
__UpperCAmelCase = requests.json()
except Exception:
__UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__UpperCAmelCase = eval(snake_case_ )
except Exception:
__UpperCAmelCase = data.split('''\n''' )
req.close()
return data
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = requests.get(snake_case_ )
__UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , '''rb''' ) as stream:
__UpperCAmelCase = pkl.load(snake_case_ )
__UpperCAmelCase = weights.pop('''model''' )
__UpperCAmelCase = {}
for k, v in model.items():
__UpperCAmelCase = torch.from_numpy(snake_case_ )
if "running_var" in k:
__UpperCAmelCase = torch.tensor([0] )
__UpperCAmelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__UpperCAmelCase = zero
return new
def lowercase__ ( ):
print(F'''{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple="RGB" ):
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = cva.imread(snake_case_ )
else:
__UpperCAmelCase = get_image_from_url(snake_case_ )
assert img is not None, F'''could not connect to: {im}'''
__UpperCAmelCase = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__UpperCAmelCase = img[:, :, ::-1]
return img
def lowercase__ ( snake_case_ :Any , snake_case_ :int=1 ):
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 86
| 0
|
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_SCREAMING_SNAKE_CASE : Dict = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
_SCREAMING_SNAKE_CASE : str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep=""".""" )
_SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_SCREAMING_SNAKE_CASE : Tuple = """.""".join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 200
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
'''simple docstring'''
A_ : Optional[Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : Tuple = CursorInfo()
_SCREAMING_SNAKE_CASE : Tuple = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def snake_case_ ( ):
"""simple docstring"""
if os.name == "nt":
_SCREAMING_SNAKE_CASE : int = CursorInfo()
_SCREAMING_SNAKE_CASE : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : Tuple = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def snake_case_ ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 200
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[int] = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = torch.permute(__lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCamelCase ):
# linear layer
a = flax_key_tuple[:-1] + ("""weight""",)
a = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if "metadata" in layer:
a = layer.split("""metadata""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a = layer.split("""kvstore""" )
a = """""".join(split_layer[0] )[:-1]
a = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a = layer.split("""/""" )
a = """/""".join(split_layer[:-1] )
a = (split_layer[-1],)
if "kvstore/path" in layer:
a = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
a = """file"""
else:
a = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __A ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = rename_keys(__lowerCamelCase )
a = {}
for k, v in current_block.items():
a = v
a = new_current_block
torch.save(__lowerCamelCase , __lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = WEIGHTS_NAME ) -> List[str]:
a = convert_file_size_to_int(__lowerCamelCase )
a = []
a = {}
a = 0
a = 0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a = flatten_dict(__lowerCamelCase , sep="""/""" )
a = {}
for layer in checkpoint_info.keys():
a , a , a = get_key_and_tensorstore_dict(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if curr_real_layer_name in all_layers:
a = content
else:
a = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a = torch.tensor(__lowerCamelCase )
a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a , a = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCamelCase )
a = """/""".join(__lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a = os.path.join(
__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a = {}
a = 0
a = raw_weights.to(getattr(__lowerCamelCase , __lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{len(__lowerCamelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a = {}
a = {}
for idx, shard in enumerate(__lowerCamelCase ):
a = weights_name.replace(
""".bin""" , f'-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
a = os.path.join(__lowerCamelCase , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
a = shard
for key in shard:
a = shard_file
# Add the metadata
a = {"""total_size""": total_size}
a = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
a = json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + """\n"""
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __A ( ) -> Tuple:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a = TaTokenizer.from_pretrained("""t5-small""" )
a = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
a = model.generate(__lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 347
| 0
|
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_UpperCamelCase = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
__UpperCAmelCase : Union[str, Any] = True
# Deal with multi-line cases
elif (
re.search(
rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
__UpperCAmelCase : int = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__UpperCAmelCase : List[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__UpperCAmelCase : List[str] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__UpperCAmelCase : Dict = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__UpperCAmelCase : str = True
if not attribute_used:
__UpperCAmelCase : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__UpperCAmelCase : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__UpperCAmelCase : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__UpperCAmelCase : Optional[Any] = True
elif attribute.endswith("""_token_id""" ):
__UpperCAmelCase : int = True
# configuration class specific cases
if not case_allowed:
__UpperCAmelCase : Dict = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__UpperCAmelCase : Union[str, Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = dict(inspect.signature(config_class.__init__ ).parameters )
__UpperCAmelCase : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__UpperCAmelCase : Optional[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__UpperCAmelCase : Optional[int] = {}
if len(config_class.attribute_map ) > 0:
__UpperCAmelCase : List[Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__UpperCAmelCase : Any = inspect.getsourcefile(lowerCAmelCase__ )
__UpperCAmelCase : str = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__UpperCAmelCase : Union[str, Any] = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__UpperCAmelCase : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
__UpperCAmelCase : Optional[int] = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
__UpperCAmelCase : int = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : List[str] = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__UpperCAmelCase : List[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__UpperCAmelCase : Tuple = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Dict = unused_attributes
if len(lowerCAmelCase__ ) > 0:
__UpperCAmelCase : Optional[Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 254
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase ( A_ ):
def __init__(self : Tuple , snake_case__ : Any , snake_case__ : str=None , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : Optional[int] = parent
snake_case : Union[str, Any] = config_class
snake_case : Dict = has_text_modality
snake_case : int = kwargs
snake_case : Any = common_properties
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
snake_case : str = self.config_class(**self.inputs_dict )
snake_case : int = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(snake_case__ , snake_case__ ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(snake_case__ ):
try:
setattr(snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(snake_case__ ):
try:
snake_case : Dict = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(snake_case__ , snake_case__ ) , snake_case__ , msg=f"""`{name} value {idx} expected, but was {getattr(snake_case__ , snake_case__ )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict )
snake_case : Optional[Any] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : str = os.path.join(snake_case__ , "config.json" )
config_first.to_json_file(snake_case__ )
snake_case : Any = self.config_class.from_json_file(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(snake_case__ )
snake_case : Tuple = self.config_class.from_pretrained(snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Tuple = self.config_class(**self.inputs_dict )
snake_case : List[str] = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case : Tuple = os.path.join(snake_case__ , snake_case__ )
config_first.save_pretrained(snake_case__ )
snake_case : Any = self.config_class.from_pretrained(snake_case__ , subfolder=snake_case__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : int = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[Any]:
'''simple docstring'''
if self.config_class.is_composition:
return
snake_case : Dict = self.config_class()
self.parent.assertIsNotNone(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = copy.deepcopy(snake_case__ )
snake_case : List[str] = self.config_class(**snake_case__ )
snake_case : Dict = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(snake_case__ , snake_case__ ) != value:
wrong_values.append((key, getattr(snake_case__ , snake_case__ ), value) )
if len(snake_case__ ) > 0:
snake_case : Tuple = "\n".join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _SCREAMING_SNAKE_CASE (self : int ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 362
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase : Dict =logging.get_logger(__name__)
@dataclass
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **snake_case__ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase_ = deprecated_arg[3:]
UpperCamelCase_ = not kwargs.pop(snake_case__ )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
UpperCamelCase_ = kwargs.pop("tpu_name" , self.tpu_name )
UpperCamelCase_ = kwargs.pop("device_idx" , self.device_idx )
UpperCamelCase_ = kwargs.pop("eager_mode" , self.eager_mode )
UpperCamelCase_ = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**snake_case__ )
lowercase__ = field(
default=a_ , metadata={"""help""": """Name of TPU"""} , )
lowercase__ = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
lowercase__ = field(default=a_ , metadata={"""help""": """Benchmark models in eager model."""} )
lowercase__ = field(
default=a_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def _lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self , ["tf"] )
UpperCamelCase_ = None
if self.tpu:
try:
if self.tpu_name:
UpperCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCamelCase_ = None
return tpu
@cached_property
def _lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCamelCase_ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
UpperCamelCase_ = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
UpperCamelCase_ = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 128
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase : Tuple =2_9979_2458
# Symbols
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =symbols("""ct x y z""")
def _lowerCAmelCase (_lowerCAmelCase):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!")
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!")
return velocity / c
def _lowerCAmelCase (_lowerCAmelCase):
return 1 / sqrt(1 - beta(_lowerCAmelCase) ** 2)
def _lowerCAmelCase (_lowerCAmelCase):
return np.array(
[
[gamma(_lowerCAmelCase), -gamma(_lowerCAmelCase) * beta(_lowerCAmelCase), 0, 0],
[-gamma(_lowerCAmelCase) * beta(_lowerCAmelCase), gamma(_lowerCAmelCase), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase = None):
# Ensure event is not empty
if event is None:
UpperCamelCase_ = np.array([ct, x, y, z]) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCAmelCase) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase : Optional[Any] =transform(2997_9245)
print("""Example of four vector: """)
print(F"ct' = {four_vector[0]}")
print(F"x' = {four_vector[1]}")
print(F"y' = {four_vector[2]}")
print(F"z' = {four_vector[3]}")
# Substitute symbols with numerical values
UpperCAmelCase : List[Any] ={ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase : Optional[Any] =[four_vector[i].subs(sub_dict) for i in range(4)]
print(F"\n{numerical_vector}")
| 128
| 1
|
from scipy.stats import pearsonr
import datasets
A_ : Dict = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
A_ : Tuple = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
A_ : Optional[int] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , A__ , A__ , A__=False ):
if return_pvalue:
A__ : List[Any] = pearsonr(lowerCamelCase_ , lowerCamelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )}
| 361
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=A__ ).to(A__ )
A__ : str = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A__ : int = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
A__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
A__ : Union[str, Any] = model(input_ids.to(A__ ) , labels=labels.to(A__ ) ).loss
A__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
A__ : Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 141
| 0
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = CustomTokenizer
pass
| 232
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'pegasus'
_A = ['past_key_values']
_A = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self :Dict , a :Dict=5_0_2_6_5 , a :Dict=1_0_2_4 , a :Union[str, Any]=1_2 , a :Any=4_0_9_6 , a :str=1_6 , a :str=1_2 , a :Optional[Any]=4_0_9_6 , a :int=1_6 , a :Optional[int]=0.0 , a :Optional[int]=0.0 , a :List[Any]=True , a :Union[str, Any]=True , a :int="gelu" , a :Dict=1_0_2_4 , a :List[Any]=0.1 , a :List[str]=0.0 , a :List[Any]=0.0 , a :str=0.02 , a :int=0 , a :Any=False , a :Dict=0 , a :int=1 , a :Optional[Any]=1 , **a :Optional[int] , ) -> str:
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : Union[str, Any] = max_position_embeddings
__UpperCamelCase : str = d_model
__UpperCamelCase : Dict = encoder_ffn_dim
__UpperCamelCase : int = encoder_layers
__UpperCamelCase : int = encoder_attention_heads
__UpperCamelCase : List[Any] = decoder_ffn_dim
__UpperCamelCase : List[Any] = decoder_layers
__UpperCamelCase : List[str] = decoder_attention_heads
__UpperCamelCase : str = dropout
__UpperCamelCase : Union[str, Any] = attention_dropout
__UpperCamelCase : List[str] = activation_dropout
__UpperCamelCase : Optional[Any] = activation_function
__UpperCamelCase : Tuple = init_std
__UpperCamelCase : Optional[int] = encoder_layerdrop
__UpperCamelCase : Union[str, Any] = decoder_layerdrop
__UpperCamelCase : Optional[Any] = use_cache
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
@property
def _lowerCamelCase ( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return self.d_model
| 232
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.dummy_uncond_unet
lowercase__ : Dict = DDIMScheduler()
lowercase__ : Optional[Any] = self.dummy_vq_model
lowercase__ : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" ).images
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase__ : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="numpy" ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Optional[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase__ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 121
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : str = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''falcon'''
__UpperCamelCase = ['''past_key_values''']
def __init__( self : Tuple , UpperCamelCase_ : Tuple=6_5_0_2_4 , UpperCamelCase_ : List[Any]=4_5_4_4 , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : Dict=7_1 , UpperCamelCase_ : int=1E-5 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=False , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Optional[int]=1_1 , UpperCamelCase_ : Dict=1_1 , **UpperCamelCase_ : Any , ):
lowerCAmelCase : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCAmelCase : Union[str, Any] = kwargs.pop('''n_embed''' , UpperCamelCase_ )
lowerCAmelCase : str = hidden_size if n_embed is None else n_embed
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : List[str] = layer_norm_epsilon
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : str = use_cache
lowerCAmelCase : Optional[Any] = hidden_dropout
lowerCAmelCase : int = attention_dropout
lowerCAmelCase : Dict = bos_token_id
lowerCAmelCase : str = eos_token_id
lowerCAmelCase : Any = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCAmelCase : int = alibi
lowerCAmelCase : Tuple = new_decoder_architecture
lowerCAmelCase : int = multi_query # Ignored when new_decoder_architecture is True
lowerCAmelCase : Dict = parallel_attn
lowerCAmelCase : Any = bias
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return not self.alibi
| 60
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return image
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ , a__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __lowercase ( a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 3_64 if 'coco' in model_name else 2_24
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def __lowercase ( a__ , a__=None , a__=False ) -> Any:
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(a__ , eos_token_id=a__ )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(a__ ).eval()
__SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('Done!' )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
if key.startswith('Qformer.bert' ):
__SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace('self' , 'attention' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__SCREAMING_SNAKE_CASE = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__SCREAMING_SNAKE_CASE = key.replace('t5' , 'language' )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors['eval'](a__ ).unsqueeze(0 ).to(a__ )
__SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ )
__SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a__ )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
lowerCAmelCase__ : Union[str, Any] =[
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ : int =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a__ ( __snake_case , unittest.TestCase ):
A__ : Any = BarthezTokenizer
A__ : Union[str, Any] = BarthezTokenizerFast
A__ : Dict = True
A__ : int = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
__a = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase )
__a = tokenizer
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = '<pad>'
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase ) , 1_0_1_1_2_2 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__a = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
__a = self.tokenizer(
UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = 'I was born in 92000, and this is falsé.'
__a = tokenizer.tokenize(UpperCAmelCase )
__a = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__a = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__a = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(UpperCAmelCase )
__a = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> int:
# fmt: off
__a = {'input_ids': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__a = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=UpperCAmelCase , )
| 197
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase( __lowerCamelCase ):
for param in module.parameters():
__a = False
def lowerCAmelCase( ):
__a = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def lowerCAmelCase( __lowerCamelCase ):
__a = plt.imshow(__lowerCamelCase )
fig.axes.get_xaxis().set_visible(__lowerCamelCase )
fig.axes.get_yaxis().set_visible(__lowerCamelCase )
plt.show()
def lowerCAmelCase( ):
__a = datetime.now()
__a = current_time.strftime('%H:%M:%S' )
return timestamp
| 197
| 1
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ : Dict = get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCAmelCase__ :
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : int , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase__ :
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase__ ( __lowercase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> jnp.ndarray:
for processor in self:
__lowerCamelCase = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__lowerCamelCase = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : float ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__lowerCamelCase = temperature
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
__lowerCamelCase = scores / self.temperature
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = -float('''Inf''' ) , SCREAMING_SNAKE_CASE__ : int = 1 ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__lowerCamelCase = top_p
__lowerCamelCase = filter_value
__lowerCamelCase = min_tokens_to_keep
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase = lax.top_k(SCREAMING_SNAKE_CASE__ , scores.shape[-1] )
__lowerCamelCase = jnp.full_like(SCREAMING_SNAKE_CASE__ , self.filter_value )
__lowerCamelCase = jax.nn.softmax(SCREAMING_SNAKE_CASE__ , axis=-1 ).cumsum(axis=-1 )
__lowerCamelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase = jnp.roll(SCREAMING_SNAKE_CASE__ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE__ )
# min tokens to keep
__lowerCamelCase = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.where(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[-1]
return next_scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = -float('''Inf''' ) , SCREAMING_SNAKE_CASE__ : int = 1 ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__lowerCamelCase = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = filter_value
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
__lowerCamelCase , __lowerCamelCase = scores.shape
__lowerCamelCase = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCamelCase = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCamelCase , __lowerCamelCase = lax.top_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCamelCase = topk_scores.flatten()
__lowerCamelCase = topk_indices.flatten() + shift
__lowerCamelCase = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = next_scores_flat.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return next_scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = bos_token_id
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
__lowerCamelCase = jnp.full(scores.shape , -float('''inf''' ) )
__lowerCamelCase = 1 - jnp.bool_(cur_len - 1 )
__lowerCamelCase = jnp.where(SCREAMING_SNAKE_CASE__ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE__ )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
__lowerCamelCase = max_length
__lowerCamelCase = eos_token_id
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
__lowerCamelCase = jnp.full(scores.shape , -float('''inf''' ) )
__lowerCamelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCamelCase = jnp.where(SCREAMING_SNAKE_CASE__ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE__ )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__lowerCamelCase = min_length
__lowerCamelCase = eos_token_id
def __call__( self : int , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
__lowerCamelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCamelCase = jnp.where(SCREAMING_SNAKE_CASE__ , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE__ )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = begin_index
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCamelCase = jnp.where(SCREAMING_SNAKE_CASE__ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE__ )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : list ) -> Tuple:
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
__lowerCamelCase = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
__lowerCamelCase = dict(SCREAMING_SNAKE_CASE__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase = force_token_array.at[index].set(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.intaa(SCREAMING_SNAKE_CASE__ )
def __call__( self : int , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
def _force_token(SCREAMING_SNAKE_CASE__ : Optional[int] ):
__lowerCamelCase = scores.shape[0]
__lowerCamelCase = self.force_token_array[generation_idx]
__lowerCamelCase = jnp.ones_like(SCREAMING_SNAKE_CASE__ , dtype=scores.dtype ) * -float('''inf''' )
__lowerCamelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCamelCase = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (0, current_token) )
return new_scores
__lowerCamelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE__ ) , lambda: scores , ) , )
return scores
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
__lowerCamelCase = generate_config.eos_token_id
__lowerCamelCase = generate_config.no_timestamps_token_id
__lowerCamelCase = generate_config.no_timestamps_token_id + 1
__lowerCamelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE__ , '''max_initial_timestamp_index''' ):
__lowerCamelCase = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase = model_config.vocab_size
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCamelCase = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__lowerCamelCase = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return jnp.where(
SCREAMING_SNAKE_CASE__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = jax.vmap(SCREAMING_SNAKE_CASE__ )(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase = jnp.where(
SCREAMING_SNAKE_CASE__ , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
__lowerCamelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCamelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = jax.vmap(SCREAMING_SNAKE_CASE__ )(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return scores
| 270
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270
| 1
|
"""simple docstring"""
import math
import sys
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =""""""
try:
with open(__UpperCamelCase , """rb""" ) as binary_file:
_lowerCAmelCase =binary_file.read()
for dat in data:
_lowerCAmelCase =F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase ={"""0""": """0""", """1""": """1"""}
_lowerCAmelCase , _lowerCAmelCase ="""""", """"""
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase =lexicon[curr_string]
result += last_match_id
_lowerCAmelCase =last_match_id + """0"""
if math.loga(__UpperCamelCase ).is_integer():
_lowerCAmelCase ={}
for curr_key in list(__UpperCamelCase ):
_lowerCAmelCase =lexicon.pop(__UpperCamelCase )
_lowerCAmelCase =new_lex
_lowerCAmelCase =last_match_id + """1"""
index += 1
_lowerCAmelCase =""""""
return result
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =8
try:
with open(__UpperCamelCase , """wb""" ) as opened_file:
_lowerCAmelCase =[
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase =data_bits[counter:]
_lowerCAmelCase =data_bits[counter + 1 :]
return data_bits
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None:
_lowerCAmelCase =read_file_binary(__UpperCamelCase )
_lowerCAmelCase =remove_prefix(__UpperCamelCase )
_lowerCAmelCase =decompress_data(__UpperCamelCase )
write_file_binary(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 364
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = XGLMConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =d_model
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =ffn_dim
_lowerCAmelCase =activation_function
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =None
_lowerCAmelCase =0
_lowerCAmelCase =2
_lowerCAmelCase =1
def _lowerCAmelCase ( self ) -> Dict:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCAmelCase ( self ) -> str:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def _lowerCAmelCase ( self ) -> Dict:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =TFXGLMModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def _lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
super().test_resize_token_embeddings()
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
_lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
_lowerCAmelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
_lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
_lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =(
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
_lowerCAmelCase ="""left"""
# use different length sentences to test batching
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
_lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
_lowerCAmelCase =inputs["""input_ids"""]
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
_lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
_lowerCAmelCase =[
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 341
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase__ ):
a__ : Any = (UniPCMultistepScheduler,)
a__ : Tuple = (("""num_inference_steps""", 25),)
def _lowercase (self : str , **__a : Tuple ):
UpperCAmelCase_ = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__a )
return config
def _lowercase (self : List[str] , __a : Dict=0 , **__a : Dict ):
UpperCAmelCase_ = dict(self.forward_default_kwargs )
UpperCAmelCase_ = kwargs.pop("num_inference_steps" , __a )
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**__a )
UpperCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
UpperCAmelCase_ = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase_ = scheduler.step(__a , __a , __a , **__a ).prev_sample
UpperCAmelCase_ = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase (self : Any , __a : Optional[int]=0 , **__a : Dict ):
UpperCAmelCase_ = dict(self.forward_default_kwargs )
UpperCAmelCase_ = kwargs.pop("num_inference_steps" , __a )
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
UpperCAmelCase_ = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(__a , __a , __a , **__a ).prev_sample
UpperCAmelCase_ = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _lowercase (self : Optional[Any] , __a : Union[str, Any]=None , **__a : Optional[int] ):
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**__a )
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**__a )
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ = model(__a , __a )
UpperCAmelCase_ = scheduler.step(__a , __a , __a ).prev_sample
return sample
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = dict(self.forward_default_kwargs )
UpperCAmelCase_ = kwargs.pop("num_inference_steps" , __a )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , "set_timesteps" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.timesteps[5]
UpperCAmelCase_ = scheduler.timesteps[6]
UpperCAmelCase_ = scheduler.step(__a , __a , __a , **__a ).prev_sample
UpperCAmelCase_ = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase (self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase_ = self.full_loop(scheduler=__a )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ = self.full_loop(scheduler=__a )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def _lowercase (self : Union[str, Any] ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def _lowercase (self : Optional[int] ):
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , solver_order=__a , solver_type=__a , )
def _lowercase (self : str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def _lowercase (self : List[Any] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , )
UpperCAmelCase_ = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def _lowercase (self : int ):
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def _lowercase (self : Any ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.10_14 ) < 1E-3
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
UpperCAmelCase_ = scheduler_class(**__a )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ = model(__a , __a )
UpperCAmelCase_ = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
def _lowercase (self : Dict , **__a : Optional[int] ):
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**__a )
UpperCAmelCase_ = scheduler_class(**__a )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1
| 1
|
from math import isclose, sqrt
def UpperCamelCase ( _A : float , _A : float , _A : float )-> tuple[float, float, float]:
"""simple docstring"""
A__ = point_y / 4 / point_x
A__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
A__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
A__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
A__ = outgoing_gradient**2 + 4
A__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
A__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
A__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
A__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
A__ = x_minus if isclose(_A , _A ) else x_plus
A__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def UpperCamelCase ( _A : float = 1.4 , _A : float = -9.6 )-> int:
"""simple docstring"""
A__ = 0
A__ = first_x_coord
A__ = first_y_coord
A__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
A__ , A__ , A__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 356
|
from __future__ import annotations
from random import random
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ = None ):
A__ = value
A__ = random()
A__ = None
A__ = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
A__ = str(self.value ) + " "
A__ = str(self.left or "" )
A__ = str(self.right or "" )
return value + left + right
def UpperCamelCase ( _A : Node | None , _A : int )-> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A__ , A__ = split(root.left , _A )
return left, root
else:
A__ , A__ = split(root.right , _A )
return root, right
def UpperCamelCase ( _A : Node | None , _A : Node | None )-> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A__ = merge(left.right , _A )
return left
else:
A__ = merge(_A , right.left )
return right
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ = Node(_A )
A__ , A__ = split(_A , _A )
return merge(merge(_A , _A ) , _A )
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ , A__ = split(_A , value - 1 )
A__ , A__ = split(_A , _A )
return merge(_A , _A )
def UpperCamelCase ( _A : Node | None )-> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def UpperCamelCase ( _A : Node | None , _A : str )-> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
A__ = insert(_A , int(arg[1:] ) )
elif arg[0] == "-":
A__ = erase(_A , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
A__ = input()
while args != "q":
A__ = interact_treap(_A , _A )
print(_A )
A__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198
| 0
|
import math
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = 0) -> Optional[int]:
__snake_case: Union[str, Any] = end or len(SCREAMING_SNAKE_CASE__)
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Dict = i
__snake_case: Optional[int] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case: Tuple = array[temp_index - 1]
temp_index -= 1
__snake_case: Any = temp_index_value
return array
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Dict: # Max Heap
__snake_case: Optional[int] = index
__snake_case: Any = 2 * index + 1 # Left Node
__snake_case: Tuple = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case: int = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case: int = right_index
if largest != index:
__snake_case: str = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: Any = len(SCREAMING_SNAKE_CASE__)
for i in range(n // 2 , -1 , -1):
heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for i in range(n - 1 , 0 , -1):
__snake_case: Optional[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__)
return array
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
__snake_case: str = low
__snake_case: List[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case: Tuple = array[j], array[i]
i += 1
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
if len(SCREAMING_SNAKE_CASE__) == 0:
return array
__snake_case: str = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE__)))
__snake_case: str = 16
return intro_sort(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE__)
max_depth -= 1
__snake_case: Dict = median_of_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , start + ((end - start) // 2) + 1 , end - 1)
__snake_case: Any = partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
intro_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = p
return insertion_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma : ").strip()
__UpperCAmelCase : Dict = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 111
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95
| 0
|
"""simple docstring"""
from __future__ import annotations
A__ : List[str] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int , lowerCamelCase__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
lowerCamelCase_ : List[str] =[
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the reference grid
lowerCamelCase_ : Tuple =1
lowerCamelCase_ : Optional[Any] =[
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the action grid
lowerCamelCase_ : List[str] =init[0]
lowerCamelCase_ : Optional[int] =init[1]
lowerCamelCase_ : Dict =0
lowerCamelCase_ : Any =g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase_ : int =[[f, g, x, y]]
lowerCamelCase_ : Optional[Any] =False # flag that is set when search is complete
lowerCamelCase_ : Dict =False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase_ : List[str] =cell.pop()
lowerCamelCase_ : int =next_cell[2]
lowerCamelCase_ : Any =next_cell[3]
lowerCamelCase_ : str =next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase_ : str =True
else:
for i in range(len(lowerCamelCase__ ) ): # to try out different valid actions
lowerCamelCase_ : Tuple =x + DIRECTIONS[i][0]
lowerCamelCase_ : Optional[int] =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase_ : List[str] =g + cost
lowerCamelCase_ : Optional[Any] =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase_ : Any =1
lowerCamelCase_ : Union[str, Any] =i
lowerCamelCase_ : Optional[int] =[]
lowerCamelCase_ : List[str] =goal[0]
lowerCamelCase_ : Optional[int] =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase_ : str =x - DIRECTIONS[action[x][y]][0]
lowerCamelCase_ : Any =y - DIRECTIONS[action[x][y]][1]
lowerCamelCase_ : Dict =xa
lowerCamelCase_ : Tuple =ya
invpath.append([x, y] )
lowerCamelCase_ : Union[str, Any] =[]
for i in range(len(lowerCamelCase__ ) ):
path.append(invpath[len(lowerCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
A__ : List[Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
A__ : str = [0, 0]
# all coordinates are given in format [y,x]
A__ : Any = [len(grid) - 1, len(grid[0]) - 1]
A__ : List[Any] = 1
# the cost map which pushes the path closer to the goal
A__ : List[Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
A__ : Dict = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
A__ : Any = 99
A__ , A__ : str = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 209
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase__ :
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=13 , snake_case__ : str=7 , snake_case__ : Union[str, Any]=6 , snake_case__ : str=17 , snake_case__ : Any=23 , snake_case__ : int=11 , snake_case__ : Tuple=True , ):
lowerCamelCase_ : str =parent
lowerCamelCase_ : Union[str, Any] =batch_size
lowerCamelCase_ : List[Any] =seq_length
lowerCamelCase_ : Union[str, Any] =act_dim
lowerCamelCase_ : Optional[Any] =state_dim
lowerCamelCase_ : Optional[Any] =hidden_size
lowerCamelCase_ : Tuple =max_length
lowerCamelCase_ : List[Any] =is_training
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCamelCase_ : List[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : Optional[Any] =floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCamelCase_ : List[Any] =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowerCamelCase_ : Optional[int] =random_attention_mask((self.batch_size, self.seq_length) )
lowerCamelCase_ : List[str] =self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self : Any ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , ):
lowerCamelCase_ : Tuple =DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : str =model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Optional[int] =config_and_inputs
lowerCamelCase_ : Optional[int] ={
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCAmelCase :int = ()
_UpperCAmelCase :int = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCAmelCase :Union[str, Any] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :int = False
_UpperCAmelCase :str = False
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Dict =DecisionTransformerModelTester(self )
lowerCamelCase_ : str =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str =DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] =model_class(snake_case__ )
lowerCamelCase_ : int =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[Any] =[*signature.parameters.keys()]
lowerCamelCase_ : List[str] =[
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =2 # number of steps of autoregressive prediction we will perform
lowerCamelCase_ : int =10 # defined by the RL environment, may be normalized
lowerCamelCase_ : List[Any] =DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowerCamelCase_ : Union[str, Any] =model.to(snake_case__ )
lowerCamelCase_ : Any =model.config
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] =torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
lowerCamelCase_ : Optional[Any] =torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=snake_case__ )
lowerCamelCase_ : int =torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCamelCase_ : str =state
lowerCamelCase_ : Optional[int] =torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : int =torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
lowerCamelCase_ : Tuple =torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
lowerCamelCase_ : str =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Union[str, Any] =torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
lowerCamelCase_ : Optional[int] =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Dict =model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCamelCase_ : str =action_pred[0, -1]
lowerCamelCase_ : Optional[int] =torch.cat([states, state] , dim=1 )
lowerCamelCase_ : Optional[Any] =returns_to_go[0, -1] - reward
lowerCamelCase_ : str =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCamelCase_ : int =torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 209
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 2
@register_to_config
def __init__( self , __magic_name__ = 0.0_2 , __magic_name__ = 1_00 , __magic_name__ = 1.0_0_7 , __magic_name__ = 80 , __magic_name__ = 0.0_5 , __magic_name__ = 50 , ) -> List[str]:
# standard deviation of the initial noise distribution
_a = sigma_max
# setable values
_a = None
_a = None
_a = None # sigma(t_i)
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> torch.FloatTensor:
return sample
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Optional[int]:
_a = num_inference_steps
_a = np.arange(0 , self.num_inference_steps )[::-1].copy()
_a = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
_a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_a = torch.tensor(__magic_name__ , dtype=torch.floataa , device=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_a = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_a = 0
# sample eps ~ N(0, S_noise^2 * I)
_a = self.config.s_noise * randn_tensor(sample.shape , generator=__magic_name__ ).to(sample.device )
_a = sigma + gamma * sigma
_a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ) -> Union[KarrasVeOutput, Tuple]:
_a = sample_hat + sigma_hat * model_output
_a = (sample_hat - pred_original_sample) / sigma_hat
_a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__magic_name__ , derivative=__magic_name__ , pred_original_sample=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = True , ) -> Union[KarrasVeOutput, Tuple]:
_a = sample_prev + sigma_prev * model_output
_a = (sample_prev - pred_original_sample) / sigma_prev
_a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__magic_name__ , derivative=__magic_name__ , pred_original_sample=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
raise NotImplementedError()
| 168
|
'''simple docstring'''
import sys
a_ : Dict = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _A (lowerCAmelCase__ :str = N ) -> int:
'''simple docstring'''
_a = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
_a = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
_a = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 168
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCAmelCase = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCamelCase ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowerCAmelCase_ : Tuple = R""".*/layers_(\d+)"""
lowerCAmelCase_ : Tuple = key
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase_ : Union[str, Any] = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , lowercase__ )
lowerCAmelCase_ : Any = R"""(encoder|decoder)\/"""
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase_ : List[Any] = re.match(lowercase__ , lowercase__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase_ : Tuple = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , lowercase__ )
lowerCAmelCase_ : Tuple = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , lowercase__ )
elif groups[0] == "decoder":
lowerCAmelCase_ : List[str] = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , lowercase__ )
lowerCAmelCase_ : Optional[int] = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase_ : List[Any] = new_key.replace(lowercase__ , lowercase__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase_ : Dict = s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ : int = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ : Any = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase_ : str = s_dict[key].shape[0]
lowerCAmelCase_ : List[Any] = s_dict[key]
for idx in range(lowercase__ ):
lowerCAmelCase_ : Optional[Any] = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase__ )
return s_dict
__UpperCAmelCase = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict ) -> int:
'''simple docstring'''
import regex as re
with open(lowercase__ , """r""" ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Tuple = re.findall(R"""(.*) = ([0-9.]*)""" , lowercase__ )
lowerCAmelCase_ : Optional[int] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase_ : Any = float(lowercase__ ) if """.""" in value else int(lowercase__ )
lowerCAmelCase_ : Optional[int] = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , lowercase__ )[0]
lowerCAmelCase_ : Optional[Any] = str(activation[1] )
lowerCAmelCase_ : int = num_experts
lowerCAmelCase_ : Optional[int] = SwitchTransformersConfig(**lowercase__ )
return config
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Dict=None , lowercase__ : Any="./" , lowercase__ : List[Any]=8 ) -> List[Any]:
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase_ : Optional[int] = checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
lowerCAmelCase_ : Any = convert_gin_to_config(lowercase__ , lowercase__ )
else:
lowerCAmelCase_ : Any = SwitchTransformersConfig.from_pretrained(lowercase__ )
lowerCAmelCase_ : List[str] = SwitchTransformersForConditionalGeneration(lowercase__ )
lowerCAmelCase_ : str = flax_params["""target"""]
lowerCAmelCase_ : int = flatten_dict(lowercase__ , sep="""/""" )
lowerCAmelCase_ : int = rename_keys(lowercase__ )
lowerCAmelCase_ : int = unflatten_dict(lowercase__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
__UpperCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] =BartphoTokenizer
UpperCAmelCase_ : str =False
UpperCAmelCase_ : str =True
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__snake_case : int = ["▁This", "▁is", "▁a", "▁t", "est"]
__snake_case : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
__snake_case : List[Any] = {"unk_token": "<unk>"}
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
__snake_case : Dict = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **UpperCAmelCase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = "This is a là test"
__snake_case : Optional[Any] = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case : List[Any] = "This is a là test"
__snake_case : Optional[int] = "▁This ▁is ▁a ▁l à ▁t est".split()
__snake_case : Optional[int] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : Tuple = tokens + [tokenizer.unk_token]
__snake_case : List[Any] = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
| 326
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
| 0
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 355
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13
| 0
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a : int = object()
# For specifying empty leaf dict `{}`
a : Dict = object()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Tuple ) ->int:
'''simple docstring'''
a : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(_lowercase ) - len(_lowercase ) + 1 ):
a : int = [x.match(_lowercase ) for x, y in zip(_lowercase , ks[i:] )]
if matches and all(_lowercase ):
return True
return False
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->Union[str, Any]:
'''simple docstring'''
def replace(_lowercase : Tuple , _lowercase : Optional[Any] ):
for rule, replacement in rules:
if _match(_lowercase , _lowercase ):
return replacement
return val
return replace
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _lowercase )),
(("transformer", "wte", "embedding"), P("mp" , _lowercase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , _lowercase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowercase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , _lowercase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Dict:
'''simple docstring'''
a : Tuple = _get_partition_rules()
a : int = _replacement_rules(_lowercase )
a : str = {k: _unmatched for k in flatten_dict(_lowercase )}
a : List[Any] = {k: replace(_lowercase , _lowercase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowercase ) )
| 105
|
import os
import numpy
import onnx
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Tuple = a == b
SCREAMING_SNAKE_CASE_ : Dict = name_a
SCREAMING_SNAKE_CASE_ : List[Any] = name_b
return res
def A_ ( a , a , a ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a , a )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
_graph_replace_input_with(node_proto.attribute[1].g , a , a )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(a , a , a )
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[Any] = inits[i].name
SCREAMING_SNAKE_CASE_ : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.dirname(a )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(a )
SCREAMING_SNAKE_CASE_ : str = onnx.load(os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ : Dict = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : str = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(len(a ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a )
dup_set.add(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = inits[j].data_type
SCREAMING_SNAKE_CASE_ : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , a )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : Any = inits[i].name
SCREAMING_SNAKE_CASE_ : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a )
else:
SCREAMING_SNAKE_CASE_ : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
SCREAMING_SNAKE_CASE_ : Tuple = sorted(a )
_remove_dup_initializers_from_model(a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Any = os.path.join(a , a )
onnx.save(a , a )
return new_model
| 253
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "xprophetnet" in prophetnet_checkpoint_path:
A_ : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
A_ : List[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
A_ : Dict = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
A_ : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
A_ : Dict = ['key_proj', 'value_proj', 'query_proj']
A_ : Dict = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
A_ : int = key.split('''.''' )
if attributes[0] == "lm_head":
A_ : Optional[int] = prophet
A_ : List[Any] = prophet_old
else:
A_ : Optional[int] = prophet.prophetnet
A_ : List[Any] = prophet_old.model
A_ : Any = False
for attribute in attributes:
if attribute in mapping:
A_ : Optional[Any] = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
A_ : List[str] = attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
A_ : Union[str, Any] = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
A_ : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
A_ : Tuple = old_model.bias
logger.info(f'''{attribute} is initialized''' )
A_ : List[str] = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
A_ : Any = old_model.in_proj_weight.shape[0] // 3
A_ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
A_ : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
A_ : List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
A_ : Dict = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
A_ : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
A_ : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
A_ : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
A_ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
A_ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] )
A_ : int = True
break
if attribute.isdigit():
A_ : Tuple = model[int(SCREAMING_SNAKE_CASE )]
A_ : Union[str, Any] = old_model[int(SCREAMING_SNAKE_CASE )]
else:
A_ : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
A_ : Tuple = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
A_ : List[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 356
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
UpperCamelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
UpperCamelCase = """▁"""
# Segments (not really needed)
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = "left"
snake_case = XLNetTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<sep>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
A_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : Optional[Any] = 3
A_ : List[Any] = do_lower_case
A_ : Optional[Any] = remove_space
A_ : Tuple = keep_accents
A_ : str = vocab_file
A_ : List[str] = False if not self.vocab_file else True
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : Optional[Any] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]:
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Union[str, Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 65
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( A_ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = KandinskyInpaintPipeline
UpperCAmelCase : Optional[int] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCAmelCase : str = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCAmelCase : List[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : int = False
@property
def __snake_case ( self : List[str]):
return 32
@property
def __snake_case ( self : Any):
return 32
@property
def __snake_case ( self : Optional[Any]):
return self.time_input_dim
@property
def __snake_case ( self : Optional[Any]):
return self.time_input_dim * 4
@property
def __snake_case ( self : Tuple):
return 100
@property
def __snake_case ( self : Optional[Any]):
a : List[str] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
return tokenizer
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
a : str = MultilingualCLIP(snake_case_)
a : Any = text_encoder.eval()
return text_encoder
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : Dict = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Tuple = UNetaDConditionModel(**snake_case_)
return model
@property
def __snake_case ( self : List[str]):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : int):
torch.manual_seed(0)
a : int = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Tuple):
a : List[Any] = self.dummy_text_encoder
a : Union[str, Any] = self.dummy_tokenizer
a : Any = self.dummy_unet
a : Union[str, Any] = self.dummy_movq
a : Any = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case_ , )
a : List[str] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=0):
a : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_)).to(snake_case_)
a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(snake_case_)
# create init_image
a : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_)).to(snake_case_)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Tuple = Image.fromarray(np.uinta(snake_case_)).convert("RGB").resize((256, 256))
# create mask
a : Tuple = np.ones((64, 64) , dtype=np.floataa)
a : List[str] = 0
if str(snake_case_).startswith("mps"):
a : Dict = torch.manual_seed(snake_case_)
else:
a : List[Any] = torch.Generator(device=snake_case_).manual_seed(snake_case_)
a : List[str] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __snake_case ( self : List[Any]):
a : List[str] = "cpu"
a : Tuple = self.get_dummy_components()
a : Optional[int] = self.pipeline_class(**snake_case_)
a : Tuple = pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
a : Tuple = pipe(**self.get_dummy_inputs(snake_case_))
a : List[str] = output.images
a : Dict = pipe(
**self.get_dummy_inputs(snake_case_) , return_dict=snake_case_ , )[0]
a : List[str] = image[0, -3:, -3:, -1]
a : Dict = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''')
assert image.shape == (1, 64, 64, 3)
a : Any = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __snake_case ( self : Any):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy")
a : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png")
a : int = np.ones((768, 768) , dtype=np.floataa)
a : Optional[Any] = 0
a : str = "a hat"
a : List[str] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa)
pipe_prior.to(snake_case_)
a : Dict = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa)
a : Tuple = pipeline.to(snake_case_)
pipeline.set_progress_bar_config(disable=snake_case_)
a : List[str] = torch.Generator(device="cpu").manual_seed(0)
a , a : List[Any] = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : int = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_)
| 40
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : bool , snake_case_ : str = None , snake_case_ : list = None ) -> Tuple:
'''simple docstring'''
A__ = None
A__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
A__ = os.path.abspath("examples" )
for item in os.listdir(snake_case_ ):
if item not in EXCLUDE_EXAMPLES:
A__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case_ , feature_script=snake_case_ , tested_section="main()" if parser_only else "training_function()" , ):
A__ = compare_against_test(
os.path.join(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ )
A__ = "\n".join(snake_case_ )
if special_strings is not None:
for string in special_strings:
A__ = diff.replace(snake_case_ , "" )
self.assertEqual(snake_case_ , "" )
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
A__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
@mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = False
@classmethod
def __magic_name__ ( cls : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __magic_name__ ( cls : Dict ) -> str:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
A__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
else:
self.assertIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
@slow
def __magic_name__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
A__ = re.findall("({.+})" , snake_case_ )
A__ = [r for r in results if "accuracy" in r][-1]
A__ = ast.literal_eval(snake_case_ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __magic_name__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
A__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , "tracking" ) ) )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 247
| 0
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : int = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase :str = 128
elif "12-12" in model_name:
lowercase :Optional[Any] = 12
lowercase :Optional[Any] = 12
elif "14-14" in model_name:
lowercase :Optional[Any] = 14
lowercase :Any = 14
elif "16-16" in model_name:
lowercase :List[str] = 16
lowercase :Dict = 16
else:
raise ValueError("Model not supported" )
lowercase :Any = "huggingface/label-files"
if "speech-commands" in model_name:
lowercase :Union[str, Any] = 35
lowercase :Optional[Any] = "speech-commands-v2-id2label.json"
else:
lowercase :str = 527
lowercase :Any = "audioset-id2label.json"
lowercase :Dict = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type="dataset" ), "r" ) )
lowercase :int = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowercase :Any = idalabel
lowercase :Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( lowerCamelCase ):
if "module.v" in name:
lowercase :Union[str, Any] = name.replace("module.v", "audio_spectrogram_transformer" )
if "cls_token" in name:
lowercase :Union[str, Any] = name.replace("cls_token", "embeddings.cls_token" )
if "dist_token" in name:
lowercase :int = name.replace("dist_token", "embeddings.distillation_token" )
if "pos_embed" in name:
lowercase :str = name.replace("pos_embed", "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase :Union[str, Any] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
lowercase :int = name.replace("blocks", "encoder.layer" )
if "attn.proj" in name:
lowercase :Optional[Any] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
lowercase :List[str] = name.replace("attn", "attention.self" )
if "norm1" in name:
lowercase :str = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
lowercase :List[Any] = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
lowercase :str = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
lowercase :int = name.replace("mlp.fc2", "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase :int = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
lowercase :Tuple = name.replace("module.mlp_head.0", "classifier.layernorm" )
if "module.mlp_head.1" in name:
lowercase :List[str] = name.replace("module.mlp_head.1", "classifier.dense" )
return name
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowercase :int = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
lowercase :int = key.split("." )
lowercase :Dict = int(key_split[3] )
lowercase :Optional[int] = config.hidden_size
if "weight" in key:
lowercase :int = val[:dim, :]
lowercase :str = val[dim : dim * 2, :]
lowercase :List[Any] = val[-dim:, :]
else:
lowercase :Tuple = val[:dim]
lowercase :List[str] = val[dim : dim * 2]
lowercase :str = val[-dim:]
else:
lowercase :str = val
return orig_state_dict
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Tuple = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase, lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False ):
lowercase :int = get_audio_spectrogram_transformer_config(lowerCamelCase )
lowercase :Union[str, Any] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
lowercase :Union[str, Any] = model_name_to_url[model_name]
lowercase :str = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="cpu" )
# remove some keys
remove_keys(lowerCamelCase )
# rename some keys
lowercase :Optional[Any] = convert_state_dict(lowerCamelCase, lowerCamelCase )
# load 🤗 model
lowercase :List[Any] = ASTForAudioClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase :int = -4.2_677_393 if "speech-commands" not in model_name else -6.845_978
lowercase :Any = 4.5_689_974 if "speech-commands" not in model_name else 5.5_654_526
lowercase :str = 1024 if "speech-commands" not in model_name else 128
lowercase :Any = ASTFeatureExtractor(mean=lowerCamelCase, std=lowerCamelCase, max_length=lowerCamelCase )
if "speech-commands" in model_name:
lowercase :List[str] = load_dataset("speech_commands", "v0.02", split="validation" )
lowercase :Optional[int] = dataset[0]["audio"]["array"]
else:
lowercase :int = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", )
lowercase :Tuple = torchaudio.load(lowerCamelCase )
lowercase :str = waveform.squeeze().numpy()
lowercase :int = feature_extractor(lowerCamelCase, sampling_rate=16000, return_tensors="pt" )
# forward pass
lowercase :Dict = model(**lowerCamelCase )
lowercase :int = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase :Optional[Any] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase :Any = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase :Union[str, Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase :List[Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase :Any = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase :Any = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase :Optional[Any] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase :Union[str, Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3], lowerCamelCase, atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
print(F"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"MIT/{model_name}" )
feature_extractor.push_to_hub(F"MIT/{model_name}" )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 356
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_UpperCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
_UpperCAmelCase : Any = "sshleifer/tiny-mbart"
@require_torch
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: str=None , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Union[str, Any]=True , ):
lowercase :Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , )
lowercase :List[Any] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowercase :Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()]
lowercase :Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase :Optional[Any] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: str ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=_lowerCAmelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.run_seqaseq_quick(
distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=_lowerCAmelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowercase :List[Any] = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowercase :str = experiments[experiment_id]
lowercase :Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowercase :List[str] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowerCAmelCase , extra_args_str=data["extra_args_str"] )
lowercase :Dict = len(re.findall(_lowerCAmelCase , cl.err ) )
self.assertEqual(_lowerCAmelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=_lowerCAmelCase , )
# Check metrics
lowercase :List[str] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history
lowercase :Dict = [log for log in logs if "eval_loss" in log.keys()]
lowercase :str = eval_metrics[0]
lowercase :Optional[int] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase )
# test if do_predict saves generations and metrics
lowercase :Optional[Any] = os.listdir(_lowerCAmelCase )
lowercase :List[str] = {os.path.basename(_lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE ( self: Tuple ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowerCAmelCase: str ) -> Tuple[int, float]:
lowercase :Tuple = "--skip_memory_metrics 0"
lowercase :List[str] = self.run_trainer(
max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=_lowerCAmelCase , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
lowercase :List[str] = TrainerState.load_from_json(Path(_lowerCAmelCase , "trainer_state.json" ) ).log_history
lowercase :Dict = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowercase :Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowercase :List[str] = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase , lowercase , lowercase :Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase , lowercase , lowercase :List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase :List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase :List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase :List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase :Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase :Union[str, Any] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: float = 3e-3 , _lowerCAmelCase: str = "adafactor" , _lowerCAmelCase: bool = False , _lowerCAmelCase: str = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = None , ):
lowercase :Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowercase :Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase :Tuple = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
lowercase :Union[str, Any] = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCAmelCase )}\n ".split()
lowercase :str = "\n --do_predict\n ".split()
lowercase :Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase :Optional[int] = get_gpu_count()
lowercase :str = get_torch_dist_unique_port()
lowercase :Union[str, Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
lowercase :Optional[int] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
else:
lowercase :Tuple = ["run_translation.py"] + args
with patch.object(_lowerCAmelCase , "argv" , _lowerCAmelCase ):
main()
return output_dir
| 158
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Optional[int]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Optional[Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =BioGptModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Union[str, Any]:
__UpperCamelCase =BioGptForCausalLM(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> str:
__UpperCamelCase =BioGptModel(config=A_ )
model.to(A_ )
model.eval()
# create attention mask
__UpperCamelCase =torch.ones(input_ids.shape , dtype=torch.long , device=A_ )
__UpperCamelCase =self.seq_length // 2
__UpperCamelCase =0
# first forward pass
__UpperCamelCase , __UpperCamelCase =model(A_ , attention_mask=A_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__UpperCamelCase =ids_tensor((1,) , A_ ).item() + 1
__UpperCamelCase =ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__UpperCamelCase =random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A_ )] , dim=1 , )
# get two different outputs
__UpperCamelCase =model(A_ , attention_mask=A_ )['last_hidden_state']
__UpperCamelCase =model(A_ , past_key_values=A_ , attention_mask=A_ )['last_hidden_state']
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Tuple:
__UpperCamelCase =BioGptModel(config=A_ ).to(A_ ).eval()
__UpperCamelCase =torch.ones(input_ids.shape , dtype=torch.long , device=A_ )
# first forward pass
__UpperCamelCase =model(A_ , attention_mask=A_ , use_cache=A_ )
__UpperCamelCase , __UpperCamelCase =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCamelCase =model(A_ , attention_mask=A_ )['last_hidden_state']
__UpperCamelCase =model(A_ , attention_mask=A_ , past_key_values=A_ )[
'last_hidden_state'
]
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ , A_=False ) -> List[str]:
__UpperCamelCase =BioGptForCausalLM(A_ )
model.to(A_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _a ( self , A_ , *A_ ) -> Tuple:
__UpperCamelCase =BioGptModel(A_ )
__UpperCamelCase =model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _a ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Tuple:
__UpperCamelCase =self.num_labels
__UpperCamelCase =BioGptForTokenClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , token_type_ids=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self ) -> int:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
def _a ( self ) -> Dict:
__UpperCamelCase =BioGptModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase =type
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A_ , gradient_checkpointing=A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(A_ )
__UpperCamelCase =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase ='left'
# Define PAD Token = EOS Token = 50256
__UpperCamelCase =tokenizer.eos_token
__UpperCamelCase =model.config.eos_token_id
# use different length sentences to test batching
__UpperCamelCase =[
'Hello, my dog is a little',
'Today, I',
]
__UpperCamelCase =tokenizer(A_ , return_tensors='pt' , padding=A_ )
__UpperCamelCase =inputs['input_ids'].to(A_ )
__UpperCamelCase =model.generate(
input_ids=A_ , attention_mask=inputs['attention_mask'].to(A_ ) , )
__UpperCamelCase =tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(A_ )
__UpperCamelCase =model.generate(input_ids=A_ )
__UpperCamelCase =inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__UpperCamelCase =tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(A_ )
__UpperCamelCase =model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
__UpperCamelCase =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =[
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
@slow
def _a ( self ) -> List[Any]:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =BioGptModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase =input_dict['input_ids']
__UpperCamelCase =input_ids.ne(1 ).to(A_ )
__UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase =BioGptForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> Any:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase ='multi_label_classification'
__UpperCamelCase =input_dict['input_ids']
__UpperCamelCase =input_ids.ne(1 ).to(A_ )
__UpperCamelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase =BioGptForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> Tuple:
__UpperCamelCase =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__UpperCamelCase =torch.tensor([[2, 4805, 9, 656, 21]] )
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =42384
__UpperCamelCase =torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , A_ )
__UpperCamelCase =torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1E-4 ) )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__UpperCamelCase =BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(A_ )
torch.manual_seed(0 )
__UpperCamelCase =tokenizer('COVID-19 is' , return_tensors='pt' ).to(A_ )
__UpperCamelCase =model.generate(
**A_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=A_ , )
__UpperCamelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__UpperCamelCase =(
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(A_ , A_ )
| 62
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase = 5_0_0_0_0_0
_UpperCAmelCase, _UpperCAmelCase = os.path.split(__file__)
_UpperCAmelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __magic_name__ ( lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =dataset.map(**lowercase )
@get_duration
def __magic_name__ ( lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =dataset.filter(**lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Optional[Any] =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE_: int =generate_example_dataset(
os.path.join(lowercase , """dataset.arrow""" ) , lowercase , num_examples=lowercase )
SCREAMING_SNAKE_CASE_: int =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase )
def tokenize(lowercase ):
return tokenizer(examples["""text"""] )
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase )
SCREAMING_SNAKE_CASE_: str =map(lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""numpy""" ):
SCREAMING_SNAKE_CASE_: Any =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""pandas""" ):
SCREAMING_SNAKE_CASE_: Dict =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE_: int =map(lowercase , function=lambda lowercase : None , batched=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase , function=lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =filter(lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase , """wb""" ) as f:
f.write(json.dumps(lowercase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
UpperCAmelCase_ = 'mctct'
def __init__( self : Any , lowercase__ : List[Any]=8_065 , lowercase__ : Optional[int]=1_536 , lowercase__ : Tuple=36 , lowercase__ : int=6_144 , lowercase__ : List[Any]=4 , lowercase__ : List[str]=384 , lowercase__ : List[Any]=920 , lowercase__ : Union[str, Any]=1e-5 , lowercase__ : Optional[Any]=0.3 , lowercase__ : Optional[int]="relu" , lowercase__ : Optional[int]=0.02 , lowercase__ : Any=0.3 , lowercase__ : Any=0.3 , lowercase__ : Optional[int]=1 , lowercase__ : Tuple=0 , lowercase__ : Union[str, Any]=2 , lowercase__ : List[Any]=1 , lowercase__ : str=0.3 , lowercase__ : Tuple=1 , lowercase__ : str=(7,) , lowercase__ : str=(3,) , lowercase__ : Tuple=80 , lowercase__ : Dict=1 , lowercase__ : List[Any]=None , lowercase__ : List[Any]="sum" , lowercase__ : str=False , **lowercase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__)
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = conv_glu_dim
lowerCAmelCase__ = conv_dropout
lowerCAmelCase__ = num_conv_layers
lowerCAmelCase__ = input_feat_per_channel
lowerCAmelCase__ = input_channels
lowerCAmelCase__ = conv_channels
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCAmelCase__ = list(snake_case__)
lowerCAmelCase__ = list(snake_case__)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""")
| 353
|
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ , lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 119
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : Optional[int] = get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : List[Any] = None ):
"""simple docstring"""
UpperCamelCase = (
os.path.join(SCREAMING_SNAKE_CASE_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
UpperCamelCase = Extractor
def A ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
UpperCamelCase = os.path.abspath(SCREAMING_SNAKE_CASE_ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE_ ) )
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE_ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE_ ) and os.listdir(SCREAMING_SNAKE_CASE_ ))
)
def A ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : int = False ):
"""simple docstring"""
UpperCamelCase = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if not extractor_format:
return input_path
UpperCamelCase = self._get_output_path(SCREAMING_SNAKE_CASE_ )
if self._do_extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return output_path
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def A ( cls : Optional[Any] , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Any ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def A ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
...
class SCREAMING_SNAKE_CASE ( a__ , a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
@staticmethod
def A ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
return f.read(SCREAMING_SNAKE_CASE_ )
@classmethod
def A ( cls : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] = b"" ):
"""simple docstring"""
if not magic_number:
UpperCamelCase = max(len(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
try:
UpperCamelCase = cls.read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE_ ) for cls_magic_number in cls.magic_numbers )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
@classmethod
def A ( cls : List[Any] , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE_ )
@staticmethod
def A ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
def resolved(UpperCamelCase__ : Optional[int] ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE_ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ).startswith(SCREAMING_SNAKE_CASE_ )
def badlink(UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> bool:
# Links are interpreted relative to the directory containing the link
UpperCamelCase = resolved(os.path.join(SCREAMING_SNAKE_CASE_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = resolved(SCREAMING_SNAKE_CASE_ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def A ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = tarfile.open(SCREAMING_SNAKE_CASE_ )
tar_file.extractall(SCREAMING_SNAKE_CASE_ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
tar_file.close()
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"\x1F\x8B"]
@staticmethod
def A ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with gzip.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as gzip_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def A ( cls : int , UpperCamelCase__ : Any , UpperCamelCase__ : int = b"" ):
"""simple docstring"""
if super().is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as fp:
UpperCamelCase = _EndRecData(SCREAMING_SNAKE_CASE_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
UpperCamelCase = fp.read(SCREAMING_SNAKE_CASE_ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE_ ) == sizeCentralDir:
UpperCamelCase = struct.unpack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def A ( UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE_ )
zip_file.close()
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def A ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
with lzma.open(SCREAMING_SNAKE_CASE_ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def A ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = rarfile.RarFile(SCREAMING_SNAKE_CASE_ )
rf.extractall(SCREAMING_SNAKE_CASE_ )
rf.close()
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def A ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
UpperCamelCase = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as ifh, open(SCREAMING_SNAKE_CASE_ , 'wb' ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"\x42\x5A\x68"]
@staticmethod
def A ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
with bza.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def A ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE_ , 'r' ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( a__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [b"\x04\x22\x4D\x18"]
@staticmethod
def A ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE_ , 'rb' ) as compressed_file:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def A ( cls : Optional[int] ):
"""simple docstring"""
return max(
len(SCREAMING_SNAKE_CASE_ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def A ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE_ , magic_number_length=SCREAMING_SNAKE_CASE_ )
except OSError:
return b""
@classmethod
def A ( cls : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple = False ):
"""simple docstring"""
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = cls.infer_extractor_format(SCREAMING_SNAKE_CASE_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def A ( cls : List[Any] , UpperCamelCase__ : Any ): # <Added version="2.4.0"/>
"""simple docstring"""
UpperCamelCase = cls._get_magic_number_max_length()
UpperCamelCase = cls._read_magic_number(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ , magic_number=SCREAMING_SNAKE_CASE_ ):
return extractor_format
@classmethod
def A ( cls : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , exist_ok=SCREAMING_SNAKE_CASE_ )
# Prevent parallel extractions
UpperCamelCase = str(Path(SCREAMING_SNAKE_CASE_ ).with_suffix('.lock' ) )
with FileLock(SCREAMING_SNAKE_CASE_ ):
shutil.rmtree(SCREAMING_SNAKE_CASE_ , ignore_errors=SCREAMING_SNAKE_CASE_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = extractor if extractor != 'deprecated' else extractor_format
else:
UpperCamelCase = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=SCREAMING_SNAKE_CASE_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE_ ):
return extractor.extract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 28
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[str] = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __SCREAMING_SNAKE_CASE ( snake_case__ ):
'''simple docstring'''
_a = """unispeech"""
def __init__( self : Dict, lowerCamelCase : Optional[int]=32, lowerCamelCase : Optional[Any]=768, lowerCamelCase : List[str]=12, lowerCamelCase : Dict=12, lowerCamelCase : Optional[int]=3072, lowerCamelCase : Dict="gelu", lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : str=0.1, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : Optional[Any]=0.0, lowerCamelCase : str=0.1, lowerCamelCase : Any=0.1, lowerCamelCase : List[Any]=0.02, lowerCamelCase : List[str]=1E-5, lowerCamelCase : Union[str, Any]="group", lowerCamelCase : str="gelu", lowerCamelCase : str=(512, 512, 512, 512, 512, 512, 512), lowerCamelCase : str=(5, 2, 2, 2, 2, 2, 2), lowerCamelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2), lowerCamelCase : Optional[Any]=False, lowerCamelCase : Tuple=128, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[Any]=False, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[int]=0.05, lowerCamelCase : Any=10, lowerCamelCase : List[Any]=2, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : List[Any]=10, lowerCamelCase : Union[str, Any]=0, lowerCamelCase : Optional[Any]=320, lowerCamelCase : Dict=2, lowerCamelCase : Tuple=0.1, lowerCamelCase : List[str]=100, lowerCamelCase : Optional[Any]=256, lowerCamelCase : str=256, lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]="mean", lowerCamelCase : Optional[Any]=False, lowerCamelCase : Optional[Any]=False, lowerCamelCase : Any=256, lowerCamelCase : List[str]=80, lowerCamelCase : List[Any]=0, lowerCamelCase : Optional[Any]=1, lowerCamelCase : Optional[Any]=2, lowerCamelCase : int=0.5, **lowerCamelCase : Any, )-> Tuple:
super().__init__(**_A, pad_token_id=_A, bos_token_id=_A, eos_token_id=_A )
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : str =feat_extract_norm
lowerCamelCase__ : Optional[Any] =feat_extract_activation
lowerCamelCase__ : Optional[Any] =list(_A )
lowerCamelCase__ : Tuple =list(_A )
lowerCamelCase__ : str =list(_A )
lowerCamelCase__ : int =conv_bias
lowerCamelCase__ : Tuple =num_conv_pos_embeddings
lowerCamelCase__ : Dict =num_conv_pos_embedding_groups
lowerCamelCase__ : Optional[int] =len(self.conv_dim )
lowerCamelCase__ : Union[str, Any] =num_hidden_layers
lowerCamelCase__ : Tuple =intermediate_size
lowerCamelCase__ : Tuple =hidden_act
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : List[str] =hidden_dropout
lowerCamelCase__ : List[Any] =attention_dropout
lowerCamelCase__ : List[str] =activation_dropout
lowerCamelCase__ : str =feat_proj_dropout
lowerCamelCase__ : Any =final_dropout
lowerCamelCase__ : Tuple =layerdrop
lowerCamelCase__ : Optional[Any] =layer_norm_eps
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : List[Any] =num_ctc_classes
lowerCamelCase__ : Tuple =vocab_size
lowerCamelCase__ : int =do_stable_layer_norm
lowerCamelCase__ : Union[str, Any] =use_weighted_layer_sum
lowerCamelCase__ : int =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ : List[str] =apply_spec_augment
lowerCamelCase__ : Optional[Any] =mask_time_prob
lowerCamelCase__ : Tuple =mask_time_length
lowerCamelCase__ : Optional[int] =mask_time_min_masks
lowerCamelCase__ : int =mask_feature_prob
lowerCamelCase__ : List[str] =mask_feature_length
lowerCamelCase__ : Any =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ : List[str] =num_codevectors_per_group
lowerCamelCase__ : List[Any] =num_codevector_groups
lowerCamelCase__ : List[str] =contrastive_logits_temperature
lowerCamelCase__ : List[Any] =feat_quantizer_dropout
lowerCamelCase__ : Optional[Any] =num_negatives
lowerCamelCase__ : List[Any] =codevector_dim
lowerCamelCase__ : Dict =proj_codevector_dim
lowerCamelCase__ : int =diversity_loss_weight
# ctc loss
lowerCamelCase__ : Optional[int] =ctc_loss_reduction
lowerCamelCase__ : Any =ctc_zero_infinity
# pretraining loss
lowerCamelCase__ : Any =replace_prob
@property
def snake_case ( self : str )-> List[str]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 364
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
def snake_case__ ( __lowerCamelCase : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , np.ndarray ):
return list(tensor.shape )
lowerCamelCase__ : List[Any] =tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
lowerCamelCase__ : List[str] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def snake_case__ ( __lowerCamelCase : tf.Tensor , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCamelCase , name=__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : str=-1 ):
"""simple docstring"""
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =tf.nn.moments(__lowerCamelCase , axes=[axis] , keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowerCamelCase__ : Optional[Any] =[1] * inputs.shape.rank
lowerCamelCase__ : Union[str, Any] =shape_list(__lowerCamelCase )[axis]
lowerCamelCase__ : Optional[Any] =tf.reshape(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any =tf.reshape(__lowerCamelCase , __lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
lowerCamelCase__ : List[str] =tf.nn.batch_normalization(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , offset=__lowerCamelCase , scale=__lowerCamelCase , variance_epsilon=__lowerCamelCase , )
return outputs
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 ):
"""simple docstring"""
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowerCamelCase__ : Optional[int] =tf.shape(__lowerCamelCase )
lowerCamelCase__ : Optional[int] =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowerCamelCase__ : List[Any] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : tf.Tensor ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , tf.Tensor ):
lowerCamelCase__ : List[Any] =tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowerCamelCase__ : int =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowerCamelCase__ : int =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowerCamelCase__ : str =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def snake_case__ ( __lowerCamelCase : tf.Tensor , __lowerCamelCase : int , __lowerCamelCase : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
__lowerCamelCase , tf.cast(__lowerCamelCase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ : Any =64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowerCamelCase__ : Tuple =[x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
lowerCamelCase__ : Optional[Any] =np.asarray(__lowerCamelCase )
lowerCamelCase__ : str =1
lowerCamelCase__ : List[Any] =np.array_split(__lowerCamelCase , __lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowerCamelCase__ : Union[str, Any] =np.array_split(__lowerCamelCase , __lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
lowerCamelCase__ : List[str] =chunk_data
else:
lowerCamelCase__ : Dict =data
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
if name in group.attrs:
lowerCamelCase__ : Optional[int] =[n.decode('''utf8''' ) if hasattr(__lowerCamelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
lowerCamelCase__ : str =[]
lowerCamelCase__ : str =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowerCamelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
def _expand_single_ad_tensor(__lowerCamelCase : List[Any] ):
if isinstance(__lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCamelCase )
| 272
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
|
'''simple docstring'''
class a__ :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = n
_lowercase : Any = [None] * self.n
_lowercase : Tuple = 0 # index of the first element
_lowercase : Union[str, Any] = 0
_lowercase : str = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.size == 0
def _lowerCamelCase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_lowercase : Optional[int] = data
_lowercase : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
_lowercase : Optional[Any] = self.array[self.front]
_lowercase : List[Any] = None
_lowercase : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 250
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 363
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "wavlm"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=7_68 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=30_72 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=1_28 , _UpperCAmelCase=16 , _UpperCAmelCase=3_20 , _UpperCAmelCase=8_00 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=3_20 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_00 , _UpperCAmelCase=2_56 , _UpperCAmelCase=2_56 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=2_56 , _UpperCAmelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=5_12 , _UpperCAmelCase=80 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = conv_bias
snake_case_ = num_buckets
snake_case_ = max_bucket_distance
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = num_ctc_classes
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = list(_UpperCAmelCase )
snake_case_ = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 267
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCamelCase ( snake_case__=None ) -> Union[str, Any]:
if subparsers is not None:
__UpperCAmelCase : Dict = subparsers.add_parser("env" )
else:
__UpperCAmelCase : List[str] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file", default=_a, help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_a )
return parser
def _UpperCamelCase ( snake_case__ ) -> Tuple:
__UpperCAmelCase : Dict = torch.__version__
__UpperCAmelCase : Tuple = torch.cuda.is_available()
__UpperCAmelCase : Optional[Any] = is_xpu_available()
__UpperCAmelCase : Dict = is_npu_available()
__UpperCAmelCase : str = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_a ):
__UpperCAmelCase : Dict = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase : List[Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_a ),
'''PyTorch NPU available''': str(_a ),
'''System RAM''': f'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
__UpperCAmelCase : List[str] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__UpperCAmelCase : Tuple = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_a, _a )
else f'''\t{accelerate_config}'''
)
print(_a )
__UpperCAmelCase : List[Any] = accelerate_config
return info
def _UpperCamelCase ( ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = env_command_parser()
__UpperCAmelCase : str = parser.parse_args()
env_command(_a )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 157
|
from __future__ import annotations
lowerCamelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class _a :
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : dict[str, list[str]] , _SCREAMING_SNAKE_CASE : str )-> None:
lowerCAmelCase__ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ : dict[str, str | None] = {}
lowerCAmelCase__ : str = source_vertex
def UpperCAmelCase__( self : str )-> None:
lowerCAmelCase__ : Dict = {self.source_vertex}
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : List[str] = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = vertex
queue.append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str )-> str:
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ : str = self.parent.get(_SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
lowerCAmelCase__ : Optional[Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return self.shortest_path(_SCREAMING_SNAKE_CASE ) + F'->{target_vertex}'
if __name__ == "__main__":
lowerCamelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 131
| 0
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __magic_name__( lowerCamelCase):
if num <= 0:
raise ValueError('''math domain error''')
return quad(_a, 0, _a, args=(_a))[0]
def __magic_name__( lowerCamelCase, lowerCamelCase):
return math.pow(_a, z - 1) * math.exp(-x)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 360
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ConsistencyModelPipeline
__UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__UpperCamelCase : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case (self , __lowercase=False ):
if class_cond:
__lowerCAmelCase = self.dummy_cond_unet
else:
__lowerCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase )
__lowerCAmelCase = ConsistencyModelPipeline(**__lowercase )
__lowerCAmelCase = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
__lowerCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase )
__lowerCAmelCase = latents
return inputs
def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ):
if type(__lowercase ) == str:
__lowerCAmelCase = torch.device(__lowercase )
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
return latents
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs()
__lowerCAmelCase = 1
__lowerCAmelCase = None
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case (self ):
__lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
__lowerCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase )
pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase )
__lowerCAmelCase = 1
__lowerCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ):
__lowerCAmelCase = pipe(**__lowercase ).images
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 9
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[Any] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "lilt"
def __init__( self : Union[str, Any] , lowercase_ : List[str]=30522 , lowercase_ : Optional[Any]=768 , lowercase_ : List[Any]=12 , lowercase_ : str=12 , lowercase_ : List[Any]=3072 , lowercase_ : int="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Dict=512 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[str]=0 , lowercase_ : int="absolute" , lowercase_ : List[str]=None , lowercase_ : Tuple=4 , lowercase_ : int=1024 , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase_ , **lowercase_)
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = position_embedding_type
SCREAMING_SNAKE_CASE_ : str = classifier_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = channel_shrink_ratio
SCREAMING_SNAKE_CASE_ : Any = max_ad_position_embeddings
| 318
|
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def _A (__a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def _A (__a ) -> np.ndarray:
"""simple docstring"""
return (gray > 1_27) & (gray <= 2_55)
def _A (__a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros_like(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE_ : Any = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCAmelCase_ : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
UpperCAmelCase_ : List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
UpperCAmelCase_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCAmelCase_ : Tuple = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCAmelCase_ : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 318
| 1
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __lowerCamelCase ( a_ : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE :Optional[Any] = min(_UpperCAmelCase ) # min() finds the minimum value
__SCREAMING_SNAKE_CASE :Tuple = max(_UpperCAmelCase ) # max() finds the maximum value
__SCREAMING_SNAKE_CASE :str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__SCREAMING_SNAKE_CASE :List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__SCREAMING_SNAKE_CASE :int = 0
for count in range(_UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
__SCREAMING_SNAKE_CASE :str = count + min_val
i += 1
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCAmelCase )
print('''Sorted order is:''' , ''' '''.join(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 191
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13
| 0
|
"""simple docstring"""
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__UpperCamelCase = 'CIDAS/clipseg-rd64-refined'
__UpperCamelCase = 'image_segmenter'
__UpperCamelCase = CLIPSegForImageSegmentation
__UpperCamelCase = ['image', 'text']
__UpperCamelCase = ['image']
def __init__(self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCamelCase , return_tensors="""pt""" )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase = self.model(**lowerCamelCase ).logits
return logits
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = outputs.cpu().detach().numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 361
|
"""simple docstring"""
import math
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(snake_case_ ) ) # Size of every segment
_lowerCAmelCase = [True] * (end + 1)
_lowerCAmelCase = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case_ )
for i in range(start * start , end + 1 , snake_case_ ):
_lowerCAmelCase = False
start += 1
prime += in_prime
_lowerCAmelCase = end + 1
_lowerCAmelCase = min(2 * end , snake_case_ )
while low <= n:
_lowerCAmelCase = [True] * (high - low + 1)
for each in in_prime:
_lowerCAmelCase = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case_ , high + 1 , snake_case_ ):
_lowerCAmelCase = False
for j in range(len(snake_case_ ) ):
if temp[j] is True:
prime.append(j + low )
_lowerCAmelCase = high + 1
_lowerCAmelCase = min(high + end , snake_case_ )
return prime
print(sieve(1_0**6))
| 317
| 0
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
__lowerCAmelCase : Any = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__lowerCAmelCase : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
__lowerCAmelCase : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
__lowerCAmelCase : int = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
__lowerCAmelCase : str = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
__lowerCAmelCase : Dict = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
__lowerCAmelCase : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
__lowerCAmelCase : List[Any] = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
__lowerCAmelCase : List[Any] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
__lowerCAmelCase : Any = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
__lowerCAmelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCAmelCase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCAmelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCAmelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCAmelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCAmelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCAmelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCAmelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCAmelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCAmelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_MAPPING
__lowerCAmelCase : str = auto_class_update(FlaxAutoModel)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCAmelCase : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCAmelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCAmelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCAmelCase : str = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCAmelCase : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCAmelCase : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCAmelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCAmelCase : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : List[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCAmelCase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __lowerCAmelCase ( _BaseAutoModelClass ):
"""simple docstring"""
A__ : Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCAmelCase : int = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 156
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 76
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self : str , A : Union[str, Any] , A : Union[str, Any]=1_3 , A : Union[str, Any]=3_2 , A : Optional[Any]=3 , A : Optional[Any]=4 , A : List[Any]=[1_0, 2_0, 3_0, 4_0] , A : Dict=[2, 2, 3, 2] , A : Union[str, Any]=True , A : str=True , A : List[str]=3_7 , A : Optional[int]="gelu" , A : int=1_0 , A : List[Any]=0.02 , A : int=["stage2", "stage3", "stage4"] , A : Optional[Any]=[2, 3, 4] , A : int=None , ):
'''simple docstring'''
a : int = parent
a : str = batch_size
a : int = image_size
a : Union[str, Any] = num_channels
a : Union[str, Any] = num_stages
a : Dict = hidden_sizes
a : Any = depths
a : str = is_training
a : Optional[int] = use_labels
a : List[str] = intermediate_size
a : int = hidden_act
a : str = num_labels
a : Optional[int] = initializer_range
a : List[str] = out_features
a : Dict = out_indices
a : Tuple = scope
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Any = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
a : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self : Optional[int] , A : Dict , A : List[Any] , A : Tuple ):
'''simple docstring'''
a : str = ConvNextModel(config=A )
model.to(A )
model.eval()
a : int = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase__ ( self : str , A : Union[str, Any] , A : Dict , A : Tuple ):
'''simple docstring'''
a : Union[str, Any] = ConvNextForImageClassification(A )
model.to(A )
model.eval()
a : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] , A : Optional[int] , A : int , A : Dict ):
'''simple docstring'''
a : Optional[int] = ConvNextBackbone(config=A )
model.to(A )
model.eval()
a : str = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
a : Union[str, Any] = None
a : Optional[int] = ConvNextBackbone(config=A )
model.to(A )
model.eval()
a : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : List[Any] = self.prepare_config_and_inputs()
a : List[str] = config_and_inputs
a : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : Optional[Any] = ConvNextModelTester(self )
a : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(A )
a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , A )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(A : Union[str, Any] , A : List[Any] , A : Union[str, Any] ):
a : int = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
a : List[Any] = model(**self._prepare_for_class(A , A ) )
a : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : Tuple = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : str = True
check_hidden_states_output(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = ConvNextModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Union[str, Any] = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(A )
a : Optional[int] = self.default_image_processor
a : Optional[Any] = prepare_img()
a : str = image_processor(images=A , return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
a : Any = model(**A )
# verify the logits
a : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
a : Optional[Any] = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@require_torch
class snake_case ( unittest.TestCase , UpperCAmelCase ):
__magic_name__ = (ConvNextBackbone,) if is_torch_available() else ()
__magic_name__ = ConvNextConfig
__magic_name__ = False
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Union[str, Any] = ConvNextModelTester(self )
| 371
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = 'examples/'
_UpperCamelCase : Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : List[str] = 'README.md'
def snake_case (A_ :str , A_ :Optional[Any] , A_ :Any ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a, a : Any = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , A_ )
a : Union[str, Any] = re_pattern.sub(A_ , A_ )
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(A_ )
def snake_case (A_ :List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='examples' )
def snake_case (A_ :Tuple , A_ :Optional[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def snake_case ():
'''simple docstring'''
a : str = '🤗 Transformers currently provides the following architectures'
a : Dict = '1. Want to contribute a new model?'
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start of the list.
a : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A_ )
def snake_case ():
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : List[str] = f.read()
a : str = REPLACE_PATTERNS['init'][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def snake_case (A_ :Optional[Any]=False ):
'''simple docstring'''
a : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Tuple = default_version.base_version
elif patch:
a : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def snake_case ():
'''simple docstring'''
a : str = get_version()
a : Optional[int] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 186
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1_0_2_4 ):
__snake_case , __snake_case : Optional[int] = [], []
__snake_case : Dict = list(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__snake_case , __snake_case : Optional[Any] = sorted_examples[0]
def is_too_big(__lowerCamelCase ):
return tok(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__snake_case : Any = new_src + " " + src
__snake_case : Dict = new_tgt + " " + tgt
if is_too_big(SCREAMING_SNAKE_CASE_ ) or is_too_big(SCREAMING_SNAKE_CASE_ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
__snake_case , __snake_case : List[str] = src, tgt
else: # can fit, keep adding
__snake_case , __snake_case : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE_ )
finished_tgt.append(SCREAMING_SNAKE_CASE_ )
return finished_src, finished_tgt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = Path(SCREAMING_SNAKE_CASE_ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ["train"]:
__snake_case , __snake_case : int = data_dir / F'{split}.source', data_dir / F'{split}.target'
__snake_case : List[Any] = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
__snake_case : Dict = [x.rstrip() for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
__snake_case , __snake_case : Optional[Any] = pack_examples(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE_ )} examples -> {len(SCREAMING_SNAKE_CASE_ )}.' )
Path(save_path / F'{split}.source' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
Path(save_path / F'{split}.target' ).open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
for split in ["val", "test"]:
__snake_case , __snake_case : Tuple = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE_ , save_path / F'{split}.target' )
def lowerCAmelCase_ ( ):
__snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=SCREAMING_SNAKE_CASE_ , default=1_2_8 )
parser.add_argument("--data_dir" , type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("--save_path" , type=SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = parser.parse_args()
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 123
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowerCAmelCase )
self.set_fail_transitions()
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _snake_case ( self , _lowerCAmelCase ) -> None:
_lowerCAmelCase = 0
for character in keyword:
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , _lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase = len(self.adlist ) - 1
else:
_lowerCAmelCase = next_state
self.adlist[current_state]["output"].append(_lowerCAmelCase )
def _snake_case ( self ) -> None:
_lowerCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = 0
while q:
_lowerCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowerCAmelCase )
_lowerCAmelCase = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowerCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
_lowerCAmelCase = self.adlist[state]["fail_state"]
_lowerCAmelCase = self.find_next_state(
_lowerCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase = 0
_lowerCAmelCase = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _snake_case ( self , _lowerCAmelCase ) -> dict[str, list[int]]:
_lowerCAmelCase = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase = 0
for i in range(len(_lowerCAmelCase ) ):
while (
self.find_next_state(_lowerCAmelCase , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase = self.adlist[current_state]["fail_state"]
_lowerCAmelCase = self.find_next_state(_lowerCAmelCase , string[i] )
if next_state is None:
_lowerCAmelCase = 0
else:
_lowerCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase = []
result[key].append(i - len(_lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = ViTImageProcessor if is_vision_available() else None
@property
def snake_case_ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Tuple = (3, 3_2, 1_2_8)
_UpperCAmelCase : int = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
_UpperCAmelCase : str = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
_UpperCAmelCase : Dict = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : List[Any] , **A : Any ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , **A : str ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Any ):
_UpperCAmelCase : str = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
_UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(A , 0 , -1 ) )
return image_input
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : int = MgpstrProcessor(tokenizer=A , image_processor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A , image_processor=A )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = image_processor(A , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : int = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = "test"
_UpperCAmelCase : List[str] = processor(text=A )
_UpperCAmelCase : List[Any] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[str] = "test"
_UpperCAmelCase : str = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[Any] = processor.char_decode(A )
_UpperCAmelCase : List[Any] = tokenizer.batch_decode(A )
_UpperCAmelCase : Optional[Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(A , A )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : str = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case_ ( self : int ):
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = torch.randn(1 , 2_7 , 3_8 )
_UpperCAmelCase : Dict = torch.randn(1 , 2_7 , 5_0_2_5_7 )
_UpperCAmelCase : Dict = torch.randn(1 , 2_7 , 3_0_5_2_2 )
_UpperCAmelCase : List[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 202
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202
| 1
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''▁'''
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCAmelCase__ = {
'''ernie-m-base''': 514,
'''ernie-m-large''': 514,
}
lowerCAmelCase__ = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] =["input_ids"]
a : Dict =VOCAB_FILES_NAMES
a : Optional[Any] =PRETRAINED_INIT_CONFIGURATION
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
a : Tuple =RESOURCE_FILES_NAMES
def __init__( self , snake_case__ , snake_case__=None , snake_case__=False , snake_case__="utf8" , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , vocab_file=snake_case__ , encoding=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
lowerCAmelCase : Union[str, Any] = do_lower_case
lowerCAmelCase : Tuple = sentencepiece_model_ckpt
lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCAmelCase : Dict = self.load_vocab(filepath=snake_case__ )
else:
lowerCAmelCase : List[str] = {self.sp_model.id_to_piece(snake_case__ ): id for id in range(self.sp_model.get_piece_size() )}
lowerCAmelCase : Dict = {v: k for k, v in self.vocab.items()}
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if text is None:
return None
lowerCAmelCase : Optional[int] = self.tokenize(snake_case__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = "", []
for i, ch in enumerate(snake_case__ ):
if ch in self.SP_CHAR_MAPPING:
lowerCAmelCase : Optional[Any] = self.SP_CHAR_MAPPING.get(snake_case__ )
else:
lowerCAmelCase : Tuple = unicodedata.normalize("NFKC" , snake_case__ )
if self.is_whitespace(snake_case__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case__ ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
lowerCAmelCase : Any = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCAmelCase : Optional[int] = token[1:]
lowerCAmelCase : Tuple = text[offset:].index(snake_case__ ) + offset
lowerCAmelCase : Optional[Any] = start + len(snake_case__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCAmelCase : Tuple = end
return token_mapping
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.__dict__.copy()
lowerCAmelCase : int = None
return state
def __setstate__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(snake_case__ , snake_case__ ) for c in text) )
def lowercase__ ( self , snake_case__ , snake_case__=False , snake_case__=64 , snake_case__=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowerCAmelCase : List[Any] = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowerCAmelCase : Any = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowerCAmelCase : Optional[Any] = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowerCAmelCase : int = self.sp_model.EncodeAsPieces(snake_case__ )
else:
lowerCAmelCase : Dict = self.sp_model.SampleEncodeAsPieces(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = []
for pi, piece in enumerate(snake_case__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case__ ) and pi != 0:
new_pieces.append(snake_case__ )
continue
else:
continue
lowerCAmelCase : List[Any] = 0
for i, chunk in enumerate(snake_case__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case__ ) or self.is_punct(snake_case__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case__ )
lowerCAmelCase : List[str] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase : Dict = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCAmelCase : Union[str, Any] = i
if len(snake_case__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : str = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = self.convert_ids_to_tokens(snake_case__ )
lowerCAmelCase : str = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.reverse_vocab.get(snake_case__ , self.unk_token )
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Optional[Any] = [self.cls_token_id]
lowerCAmelCase : Any = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowercase__ ( self , snake_case__ , snake_case__=None , snake_case__=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case__ ) + 1) + [1] * (len(snake_case__ ) + 3)
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case__ ) == 1:
lowerCAmelCase : Dict = unicodedata.category(snake_case__ )
if cat == "Zs":
return True
return False
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = {}
with io.open(snake_case__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case__ ):
lowerCAmelCase : Tuple = line.rstrip("\n" )
lowerCAmelCase : Optional[Any] = int(snake_case__ )
return token_to_idx
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = 0
if os.path.isdir(snake_case__ ):
lowerCAmelCase : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowerCAmelCase : int = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase : int = token_index
writer.write(token + "\n" )
index += 1
lowerCAmelCase : List[str] = os.path.join(snake_case__ , "sentencepiece.bpe.model" )
with open(snake_case__ , "wb" ) as fi:
lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (vocab_file,)
| 108
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase : Tuple = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase : List[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 1
|
'''simple docstring'''
import pprint
import requests
lowerCAmelCase_ = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowerCAmelCase_ = random_quotes()
pprint.pprint(response)
| 332
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A , A , A ) -> int | float:
if len(A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A , A , A ) # find max in range[left, mid]
snake_case = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 332
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :Optional[int] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
| 272
| 0
|
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :int , a :int ) -> None:
__UpperCamelCase : Dict = data
__UpperCamelCase : Node | None = None
__UpperCamelCase : Node | None = None
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
'''simple docstring'''
__UpperCamelCase : Optional[Any] = Node(1)
__UpperCamelCase : Optional[Any] = Node(2)
__UpperCamelCase : Any = Node(3)
__UpperCamelCase : int = Node(4)
__UpperCamelCase : List[Any] = Node(5)
__UpperCamelCase : Any = Node(6)
__UpperCamelCase : Optional[int] = Node(7)
__UpperCamelCase : str = Node(8)
__UpperCamelCase : Optional[int] = Node(9)
print(is_full_binary_tree(_lowerCamelCase))
print(depth_of_tree(_lowerCamelCase))
print("Tree is: ")
display(_lowerCamelCase)
if __name__ == "__main__":
main()
| 364
|
lowercase : Optional[int] = 9.8_0_6_6_5
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float = g) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 151
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
if len(a__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(a__ )
or left < -len(a__ )
or right >= len(a__ )
or right < -len(a__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__UpperCamelCase = (left + right) >> 1 # the middle
__UpperCamelCase = find_max(a__ ,a__ ,a__ ) # find max in range[left, mid]
__UpperCamelCase = find_max(a__ ,mid + 1 ,a__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 349
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = sd_pipe(**__SCREAMING_SNAKE_CASE ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=__SCREAMING_SNAKE_CASE , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-2-inpainting"""
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = """Face of a yellow cat, high resolution, sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 267
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = """laion/clap-htsat-unfused"""
lowercase_ : Dict = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Any ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : str ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : Optional[Any] = self.get_feature_extractor()
lowercase_ : str = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Tuple = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[int] = self.get_feature_extractor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase_ : Optional[int] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Dict = self.get_feature_extractor()
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
lowercase_ : Optional[int] = floats_list((3, 1000) )
lowercase_ : int = feature_extractor(lowercase_ , return_tensors="""np""" )
lowercase_ : List[Any] = processor(audios=lowercase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_feature_extractor()
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : List[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
lowercase_ : Optional[Any] = """This is a test string"""
lowercase_ : str = processor(text=lowercase_ )
lowercase_ : int = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.get_feature_extractor()
lowercase_ : Dict = self.get_tokenizer()
lowercase_ : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : List[str] = processor.batch_decode(lowercase_ )
lowercase_ : Tuple = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = self.get_feature_extractor()
lowercase_ : int = self.get_tokenizer()
lowercase_ : Optional[Any] = ClapProcessor(tokenizer=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 350
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> List[Any]:
# Initialise PyTorch model
lowercase_ : List[str] = FunnelConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase_ : Dict = FunnelBaseModel(UpperCAmelCase__ ) if base_model else FunnelModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowercase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 21
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "data2vec-vision"
def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-12 , a=2_2_4 , a=1_6 , a=3 , a=False , a=False , a=False , a=False , a=0.1 , a=0.1 , a=True , a=[3, 5, 7, 1_1] , a=[1, 2, 3, 6] , a=True , a=0.4 , a=2_5_6 , a=1 , a=False , a=2_5_5 , **a , ) -> Optional[int]:
super().__init__(**a )
lowercase__ : Any = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Dict = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Tuple = initializer_range
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Union[str, Any] = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : Dict = use_mask_token
lowercase__ : List[str] = use_absolute_position_embeddings
lowercase__ : List[Any] = use_relative_position_bias
lowercase__ : List[str] = use_shared_relative_position_bias
lowercase__ : Tuple = layer_scale_init_value
lowercase__ : int = drop_path_rate
lowercase__ : List[str] = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ : Tuple = out_indices
lowercase__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ : Optional[int] = use_auxiliary_head
lowercase__ : int = auxiliary_loss_weight
lowercase__ : List[str] = auxiliary_channels
lowercase__ : str = auxiliary_num_convs
lowercase__ : int = auxiliary_concat_input
lowercase__ : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = version.parse("1.11")
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCAmelCase ( self ) -> float:
return 1e-4
| 77
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9
| 0
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_snake_case : Optional[int] = logging.get_logger(__name__)
# General docstring
_snake_case : Tuple = 'RegNetConfig'
# Base docstring
_snake_case : Union[str, Any] = 'facebook/regnet-y-040'
_snake_case : int = [1, 1088, 7, 7]
# Image classification docstring
_snake_case : int = 'facebook/regnet-y-040'
_snake_case : Tuple = 'tabby, tabby cat'
_snake_case : List[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( nn.Module ):
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[str] = "relu" , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_a = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , groups=lowerCAmelCase_ , bias=lowerCAmelCase_ , )
_a = nn.BatchNormad(lowerCAmelCase_ )
_a = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = self.convolution(lowerCAmelCase_ )
_a = self.normalization(lowerCAmelCase_ )
_a = self.activation(lowerCAmelCase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : RegNetConfig ) -> str:
"""simple docstring"""
super().__init__()
_a = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_a = config.num_channels
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_a = self.embedder(lowerCAmelCase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 ) -> Tuple:
"""simple docstring"""
super().__init__()
_a = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
_a = nn.BatchNormad(lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Tensor ) -> Tensor:
"""simple docstring"""
_a = self.convolution(lowerCAmelCase_ )
_a = self.normalization(lowerCAmelCase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
super().__init__()
_a = nn.AdaptiveAvgPoolad((1, 1) )
_a = nn.Sequential(
nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
_a = self.pooler(lowerCAmelCase_ )
_a = self.attention(lowerCAmelCase_ )
_a = hidden_state * attention
return hidden_state
class A ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 ) -> Any:
"""simple docstring"""
super().__init__()
_a = in_channels != out_channels or stride != 1
_a = max(1 , out_channels // config.groups_width )
_a = (
RegNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_a = nn.Sequential(
RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_a = ACTaFN[config.hidden_act]
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_a = hidden_state
_a = self.layer(lowerCAmelCase_ )
_a = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_a = self.activation(lowerCAmelCase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
_a = in_channels != out_channels or stride != 1
_a = max(1 , out_channels // config.groups_width )
_a = (
RegNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
_a = nn.Sequential(
RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
_a = ACTaFN[config.hidden_act]
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
_a = hidden_state
_a = self.layer(lowerCAmelCase_ )
_a = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
_a = self.activation(lowerCAmelCase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self : Any , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_a = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
_a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(depth - 1 )] , )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Tuple ) -> str:
"""simple docstring"""
_a = self.layers(lowerCAmelCase_ )
return hidden_state
class A ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase_ : RegNetConfig ) -> Tuple:
"""simple docstring"""
super().__init__()
_a = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
_a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a = hidden_states + (hidden_state,)
_a = stage_module(lowerCAmelCase_ )
if output_hidden_states:
_a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ )
class A ( _a ):
lowercase_ = RegNetConfig
lowercase_ = 'regnet'
lowercase_ = 'pixel_values'
lowercase_ = True
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = value
_snake_case : Tuple = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_snake_case : Optional[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' ,_a ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A ( _a ):
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_a = config
_a = RegNetEmbeddings(lowerCAmelCase_ )
_a = RegNetEncoder(lowerCAmelCase_ )
_a = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
_a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = self.embedder(lowerCAmelCase_ )
_a = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_a = encoder_outputs[0]
_a = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' ,_a ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A ( _a ):
def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
_a = config.num_labels
_a = RegNetModel(lowerCAmelCase_ )
# classification head
_a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[torch.LongTensor] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = self.regnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
_a = outputs.pooler_output if return_dict else outputs[1]
_a = self.classifier(lowerCAmelCase_ )
_a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a = '''single_label_classification'''
else:
_a = '''multi_label_classification'''
if self.config.problem_type == "regression":
_a = MSELoss()
if self.num_labels == 1:
_a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_a = CrossEntropyLoss()
_a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a = BCEWithLogitsLoss()
_a = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
_a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
| 179
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 1
|
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCamelCase )
UpperCAmelCase : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase : Optional[Any] = dataset_size < in_memory_max_size
else:
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = is_small_dataset(UpperCamelCase )
assert result == expected
| 109
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: Optional[Any] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A: Optional[int] = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A: int = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : Tuple = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : Optional[Any] = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 109
| 1
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case : List[Any] = NewType('DataClass', Any)
__snake_case : str = NewType('DataClassType', Any)
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Any:
if isinstance(_UpperCamelCase, _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _UpperCAmelCase ( _UpperCamelCase : list ) -> Callable[[str], Any]:
A_ = {str(_UpperCamelCase ): choice for choice in choices}
return lambda _UpperCamelCase : str_to_choice.get(_UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( *,
_UpperCamelCase : Union[str, List[str]] = None, _UpperCamelCase : str = None, _UpperCamelCase : Any = dataclasses.MISSING, _UpperCamelCase : Callable[[], Any] = dataclasses.MISSING, _UpperCamelCase : dict = None, **_UpperCamelCase : str, ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A_ = {}
if aliases is not None:
A_ = aliases
if help is not None:
A_ = help
return dataclasses.field(metadata=_UpperCamelCase, default=_UpperCamelCase, default_factory=_UpperCamelCase, **_UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Iterable[DataClassType]
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A_ = ArgumentDefaultsHelpFormatter
super().__init__(**_SCREAMING_SNAKE_CASE )
if dataclasses.is_dataclass(_SCREAMING_SNAKE_CASE ):
A_ = [dataclass_types]
A_ = list(_SCREAMING_SNAKE_CASE )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_SCREAMING_SNAKE_CASE )
@staticmethod
def __A ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = F'''--{field.name}'''
A_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _SCREAMING_SNAKE_CASE ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
A_ = kwargs.pop('''aliases''' , [] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = [aliases]
A_ = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(_SCREAMING_SNAKE_CASE , '''UnionType''' ) and isinstance(_SCREAMING_SNAKE_CASE , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_SCREAMING_SNAKE_CASE ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(_SCREAMING_SNAKE_CASE ) not in field.type.__args__:
# filter `str` in Union
A_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A_ = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A_ = (
field.type.__args__[0] if isinstance(_SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1]
)
A_ = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A_ = {}
if origin_type is Literal or (isinstance(field.type , _SCREAMING_SNAKE_CASE ) and issubclass(field.type , _SCREAMING_SNAKE_CASE )):
if origin_type is Literal:
A_ = field.type.__args__
else:
A_ = [x.value for x in field.type]
A_ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
A_ = field.default
else:
A_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A_ = copy(_SCREAMING_SNAKE_CASE )
# Hack because type=bool in argparse does not behave as we want.
A_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A_ = default
# This tells argparse we accept 0 or 1 value after --field_name
A_ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
A_ = True
elif isclass(_SCREAMING_SNAKE_CASE ) and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ = field.type.__args__[0]
A_ = '''+'''
if field.default_factory is not dataclasses.MISSING:
A_ = field.default_factory()
elif field.default is dataclasses.MISSING:
A_ = True
else:
A_ = field.type
if field.default is not dataclasses.MISSING:
A_ = field.default
elif field.default_factory is not dataclasses.MISSING:
A_ = field.default_factory()
else:
A_ = True
parser.add_argument(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A_ = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if hasattr(_SCREAMING_SNAKE_CASE , '''_argument_group_name''' ):
A_ = self.add_argument_group(dtype._argument_group_name )
else:
A_ = self
try:
A_ = get_type_hints(_SCREAMING_SNAKE_CASE )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_SCREAMING_SNAKE_CASE ):
A_ = '''.'''.join(map(_SCREAMING_SNAKE_CASE , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_SCREAMING_SNAKE_CASE ):
if not field.init:
continue
A_ = type_hints[field.name]
self._parse_dataclass_field(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A_ = []
if args_filename:
args_files.append(Path(_SCREAMING_SNAKE_CASE ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A_ = ArgumentParser()
args_file_parser.add_argument(_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
A_ ,A_ = args_file_parser.parse_known_args(args=_SCREAMING_SNAKE_CASE )
A_ = vars(_SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip('''-''' ) , _SCREAMING_SNAKE_CASE )
if cmd_args_file_paths:
args_files.extend([Path(_SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] )
A_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A_ = file_args + args if args is not None else file_args + sys.argv[1:]
A_ ,A_ = self.parse_known_args(args=_SCREAMING_SNAKE_CASE )
A_ = []
for dtype in self.dataclass_types:
A_ = {f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE ) if f.init}
A_ = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k in keys}
for k in keys:
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ = dtype(**_SCREAMING_SNAKE_CASE )
outputs.append(_SCREAMING_SNAKE_CASE )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_SCREAMING_SNAKE_CASE )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
A_ = set(args.keys() )
A_ = []
for dtype in self.dataclass_types:
A_ = {f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE ) if f.init}
A_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A_ = dtype(**_SCREAMING_SNAKE_CASE )
outputs.append(_SCREAMING_SNAKE_CASE )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(_SCREAMING_SNAKE_CASE )}''' )
return tuple(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
with open(Path(_SCREAMING_SNAKE_CASE ) , encoding='''utf-8''' ) as open_json_file:
A_ = json.loads(open_json_file.read() )
A_ = self.parse_dict(_SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Tuple[DataClass, ...]:
A_ = self.parse_dict(yaml.safe_load(Path(_SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 18
|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __UpperCAmelCase :
'''simple docstring'''
pass
| 18
| 1
|
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
_snake_case = [
[],
[],
[],
]
def A ( self : List[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowercase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def A ( self : List[str] ):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Dict ):
'''simple docstring'''
return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] ):
'''simple docstring'''
_snake_case = []
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
_snake_case = min(self.queue )
self.queue.remove(lowercase )
return data
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.queue )
def a_ ( ) -> Dict:
_snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(__lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__lowercase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ) -> Union[str, Any]:
_snake_case = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(__lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__lowercase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 282
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward()
| 282
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
lowerCAmelCase__ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : Any="gelu" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : str="divided_space_time" , SCREAMING_SNAKE_CASE : Tuple=None , ):
lowercase__ : List[str] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : List[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = patch_size
lowercase__ : str = num_frames
lowercase__ : List[str] = is_training
lowercase__ : List[str] = use_labels
lowercase__ : int = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = attention_type
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = scope
lowercase__ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase__ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase__ : Union[str, Any] = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self : Optional[int] ):
lowercase__ : Any = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Any ):
lowercase__ : Optional[int] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase__ : List[Any] = self.num_labels
return config
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TimesformerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[Any] = TimesformerForVideoClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify the logits shape
lowercase__ : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Tuple = TimesformerModelTester(self )
lowercase__ : Any = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
lowercase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = TimesformerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
if not self.has_attentions:
pass
else:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
lowercase__ : List[str] = self.model_tester.seq_length
lowercase__ : Any = self.model_tester.num_frames
lowercase__ : Optional[int] = True
lowercase__ : List[str] = False
lowercase__ : List[Any] = True
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Dict = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Tuple = True
lowercase__ : Tuple = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self : List[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowercase__ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Dict ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = prepare_video()
lowercase__ : str = image_processor(video[:8] , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Union[str, Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 121
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class A ( unittest.TestCase ):
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
lowercase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowercase__ = 'A red cat sitting on a park bench'
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , mask_image=lowerCamelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCamelCase_ , output_type="""np""" , )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 164
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class A_ ( Generic[T] ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase_ :bool = True ):
"""simple docstring"""
lowerCamelCase__ : dict[T, list[T]] ={} # dictionary of lists
lowerCamelCase__ : int =directed
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :T , lowerCamelCase_ :T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Dict =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Union[str, Any] =[destination_vertex]
lowerCamelCase__ : Any =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : Tuple =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : str =[destination_vertex]
lowerCamelCase__ : Optional[Any] =[]
return self
def __repr__( self :Optional[Any] ):
"""simple docstring"""
return pformat(self.adj_list )
| 126
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
UpperCamelCase__ = key.replace('''module.encoder''', '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
UpperCamelCase__ = key.replace('''module.decoder''', '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
UpperCamelCase__ = key.replace(F"""patch_embed{idx}""", F"""patch_embeddings.{int(UpperCamelCase__ )-1}""" )
if "norm" in key:
UpperCamelCase__ = key.replace('''norm''', '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
UpperCamelCase__ = key.replace(F"""layer_norm{idx}""", F"""layer_norm.{int(UpperCamelCase__ )-1}""" )
if "layer_norm1" in key:
UpperCamelCase__ = key.replace('''layer_norm1''', '''layer_norm_1''' )
if "layer_norm2" in key:
UpperCamelCase__ = key.replace('''layer_norm2''', '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ = key[key.find('''block''' ) + len('''block''' )]
UpperCamelCase__ = key.replace(F"""block{idx}""", F"""block.{int(UpperCamelCase__ )-1}""" )
if "attn.q" in key:
UpperCamelCase__ = key.replace('''attn.q''', '''attention.self.query''' )
if "attn.proj" in key:
UpperCamelCase__ = key.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in key:
UpperCamelCase__ = key.replace('''attn''', '''attention.self''' )
if "fc1" in key:
UpperCamelCase__ = key.replace('''fc1''', '''dense1''' )
if "fc2" in key:
UpperCamelCase__ = key.replace('''fc2''', '''dense2''' )
if "linear_pred" in key:
UpperCamelCase__ = key.replace('''linear_pred''', '''classifier''' )
if "linear_fuse" in key:
UpperCamelCase__ = key.replace('''linear_fuse.conv''', '''linear_fuse''' )
UpperCamelCase__ = key.replace('''linear_fuse.bn''', '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
UpperCamelCase__ = key.replace(F"""linear_c{idx}""", F"""linear_c.{int(UpperCamelCase__ )-1}""" )
if "bot_conv" in key:
UpperCamelCase__ = key.replace('''bot_conv''', '''0.convolution''' )
if "skip_conv1" in key:
UpperCamelCase__ = key.replace('''skip_conv1''', '''1.convolution''' )
if "skip_conv2" in key:
UpperCamelCase__ = key.replace('''skip_conv2''', '''2.convolution''' )
if "fusion1" in key:
UpperCamelCase__ = key.replace('''fusion1''', '''1.fusion''' )
if "fusion2" in key:
UpperCamelCase__ = key.replace('''fusion2''', '''2.fusion''' )
if "fusion3" in key:
UpperCamelCase__ = key.replace('''fusion3''', '''3.fusion''' )
if "fusion" in key and "conv" in key:
UpperCamelCase__ = key.replace('''conv''', '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
UpperCamelCase__ = key.replace('''module.last_layer_depth''', '''head.head''' )
UpperCamelCase__ = value
return new_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : Any, UpperCamelCase__ : str ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCamelCase__ = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ = Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
UpperCamelCase__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCamelCase__ = GLPNImageProcessor()
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location=torch.device('''cpu''' ) )
# rename keys
UpperCamelCase__ = rename_keys(UpperCamelCase__ )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase__, UpperCamelCase__ )
# create HuggingFace model and load state dict
UpperCamelCase__ = GLPNForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# forward pass
UpperCamelCase__ = model(UpperCamelCase__ )
UpperCamelCase__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCamelCase__ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCamelCase__ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCamelCase__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], UpperCamelCase__, atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__, UpperCamelCase__ ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__, UpperCamelCase__ ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
lowercase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 35
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowercase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCamelCase_ ( UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = None
# source code of `config_class`
UpperCamelCase__ = inspect.getsource(UpperCamelCase__ )
UpperCamelCase__ = _re_checkpoint.findall(UpperCamelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCamelCase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ = ckpt_name
break
return checkpoint
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase__ = get_checkpoint_from_config_class(UpperCamelCase__ )
UpperCamelCase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCamelCase__ = '''\n'''.join(sorted(UpperCamelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 35
| 1
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_lowerCAmelCase :str = 'naver-clova-ix/donut-base'
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = DonutProcessor.from_pretrained(A )
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Any = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
_UpperCAmelCase : Optional[int] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
_UpperCAmelCase : Any = self.processor.tokenajson(A )
self.assertDictEqual(A , A )
| 263
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263
| 1
|
import os
import sys
import unittest
_lowerCAmelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : List[str] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Dict = get_test_to_tester_mapping(snake_case )
A_ : int = get_test_to_tester_mapping(snake_case )
A_ : Tuple = {"BertModelTest": "BertModelTester"}
A_ : Tuple = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Dict = get_model_to_test_mapping(snake_case )
A_ : List[str] = get_model_to_test_mapping(snake_case )
A_ : int = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
A_ : Union[str, Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Union[str, Any] = get_model_to_tester_mapping(snake_case )
A_ : List[Any] = get_model_to_tester_mapping(snake_case )
A_ : Union[str, Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
A_ : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
| 351
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 70
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( unittest.TestCase ):
def __init__( self : Tuple , lowercase_ : Dict , lowercase_ : List[str]=7 , lowercase_ : Tuple=3 , lowercase_ : List[str]=18 , lowercase_ : List[Any]=30 , lowercase_ : Tuple=4_00 , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=None , ) -> Optional[Any]:
lowercase__ : Union[str, Any] = size if size is not None else {"shortest_edge": 20}
lowercase__ : Optional[int] = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase__ : List[str] = parent
lowercase__ : int = batch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : int = min_resolution
lowercase__ : List[str] = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : Optional[int] = size
lowercase__ : Dict = do_center_crop
lowercase__ : Optional[int] = crop_size
def __UpperCamelCase ( self : List[Any] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Tuple ) -> List[str]:
lowercase__ : int = MobileNetVaImageProcessingTester(self )
@property
def __UpperCamelCase ( self : Any ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
self.assertTrue(hasattr(lowercase_ , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase_ , "crop_size" ) )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> str:
# Initialize image_processing
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Dict = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase ( self : Any ) -> str:
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Optional[Any] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : List[str] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 87
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case : Dict = """<<<<<<< This should probably be modified because it mentions: """
__snake_case : Any = """=======
>>>>>>>
"""
__snake_case : Any = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class A__(a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> Dict:
a_ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowercase , required=_lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowercase , required=_lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , *_lowercase ) -> str:
a_ : List[Any] = get_logger("""datasets-cli/converting""" )
a_ : Optional[Any] = tfds_path
a_ : List[Any] = datasets_directory
def UpperCamelCase__ ( self ) -> Dict:
if os.path.isdir(self._tfds_path ):
a_ : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
a_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a_ : Dict = []
a_ : Tuple = []
a_ : str = {}
if os.path.isdir(self._tfds_path ):
a_ : str = os.listdir(_lowercase )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a_ : List[str] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowercase , encoding="""utf-8""" ) as f:
a_ : Any = f.readlines()
a_ : Any = []
a_ : str = False
a_ : List[str] = False
a_ : List[Any] = []
for line in lines:
a_ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
a_ : List[str] = """from datasets import logging\n"""
elif "getLogger" in out_line:
a_ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : Dict = True
a_ : Optional[Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + """\n""" )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : List[str] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
a_ : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Optional[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : List[str] = f_name.replace(""".py""" , """""" )
a_ : Optional[int] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a_ : Optional[int] = os.path.basename(_lowercase )
a_ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 248
| 0
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCAmelCase : Optional[int] = (left + right) >> 1 # the middle
UpperCAmelCase : Any = find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
UpperCAmelCase : Union[str, Any] = find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 280
| 1
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ):
# Load checkpoint
lowerCAmelCase_ : Union[str, Any] = torch.load(__UpperCamelCase ,map_location='''cpu''' )
lowerCAmelCase_ : List[str] = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase_ : Union[str, Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase_ : str = v
else:
lowerCAmelCase_ : List[str] = v
lowerCAmelCase_ : Tuple = chkpt['''params''']
lowerCAmelCase_ : Optional[Any] = {n: v for n, v in config.items() if not isinstance(__UpperCamelCase ,(torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase_ : List[str] = chkpt['''dico_word2id''']
lowerCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' ,'''''' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase_ : Optional[int] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__UpperCamelCase ,__UpperCamelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase ,indent=2 ) + '''\n''' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(__UpperCamelCase ,indent=2 ) + '''\n''' )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ : Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 103
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : int=4 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]=9_9 , UpperCAmelCase : Tuple=3_6 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Union[str, Any]=3_7 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[str]=5_1_2 , UpperCAmelCase : int=1_6 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : int=6 , UpperCAmelCase : str=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1_0_0_0 , ) -> int:
__lowerCAmelCase: List[str] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Optional[Any] = num_channels
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: str = patch_size
__lowerCAmelCase: List[str] = is_training
__lowerCAmelCase: Union[str, Any] = use_input_mask
__lowerCAmelCase: Union[str, Any] = use_token_type_ids
__lowerCAmelCase: Tuple = use_labels
__lowerCAmelCase: Optional[int] = vocab_size
__lowerCAmelCase: Any = hidden_size
__lowerCAmelCase: Tuple = num_hidden_layers
__lowerCAmelCase: Optional[int] = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: Union[str, Any] = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: str = attention_probs_dropout_prob
__lowerCAmelCase: str = max_position_embeddings
__lowerCAmelCase: str = type_vocab_size
__lowerCAmelCase: Optional[Any] = type_sequence_label_size
__lowerCAmelCase: Union[str, Any] = initializer_range
__lowerCAmelCase: List[str] = coordinate_size
__lowerCAmelCase: Tuple = shape_size
__lowerCAmelCase: List[Any] = num_labels
__lowerCAmelCase: Any = num_choices
__lowerCAmelCase: List[str] = scope
__lowerCAmelCase: Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase: Optional[Any] = text_seq_length
__lowerCAmelCase: List[Any] = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase: int = self.text_seq_length + self.image_seq_length
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCAmelCase: str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase: Optional[Any] = bbox[i, j, 3]
__lowerCAmelCase: Tuple = bbox[i, j, 1]
__lowerCAmelCase: Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase: Any = bbox[i, j, 2]
__lowerCAmelCase: int = bbox[i, j, 0]
__lowerCAmelCase: int = tmp_coordinate
__lowerCAmelCase: List[Any] = tf.constant(UpperCAmelCase )
__lowerCAmelCase: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase: Union[str, Any] = None
if self.use_input_mask:
__lowerCAmelCase: List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase: int = None
if self.use_token_type_ids:
__lowerCAmelCase: List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> int:
__lowerCAmelCase: Tuple = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
__lowerCAmelCase: Dict = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase: str = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase: List[str] = model({'pixel_values': pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ) -> int:
__lowerCAmelCase: List[str] = self.num_labels
__lowerCAmelCase: Tuple = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: List[str] = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
__lowerCAmelCase: Any = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Any:
__lowerCAmelCase: str = 2
__lowerCAmelCase: Dict = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
__lowerCAmelCase: int = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)): List[str] = config_and_inputs
__lowerCAmelCase: List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : Dict = False
_lowercase : Tuple = False
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ) -> List[str]:
return True
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=False ) -> dict:
__lowerCAmelCase: Optional[Any] = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: int = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Tuple = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
__lowerCAmelCase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__lowerCAmelCase: Tuple = TFLayoutLMvaModelTester(self )
__lowerCAmelCase: str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase: List[Any] = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , 'hf_compute_loss' , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCAmelCase: Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
__lowerCAmelCase: Tuple = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCAmelCase: Optional[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Tuple = prepared_for_class.pop('input_ids' )
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCAmelCase: Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCAmelCase: str = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCAmelCase: Tuple = -1_0_0
__lowerCAmelCase: Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase )
__lowerCAmelCase: Dict = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCAmelCase: str = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCAmelCase: Any = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
__lowerCAmelCase: Tuple = prepared_for_class.keys() - inputs_dict.keys()
__lowerCAmelCase: Dict = inspect.signature(model.call ).parameters
__lowerCAmelCase: Dict = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCAmelCase: str = {0: 'input_ids'}
for label_key in label_keys:
__lowerCAmelCase: Optional[Any] = signature_names.index(UpperCAmelCase )
__lowerCAmelCase: Tuple = label_key
__lowerCAmelCase: Tuple = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCAmelCase: List[Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCAmelCase: Optional[Any] = prepared_for_class[value]
__lowerCAmelCase: Union[str, Any] = tuple(UpperCAmelCase )
# Send to model
__lowerCAmelCase: Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCAmelCase ( self : Dict ) -> Tuple:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Dict ) -> int:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Tuple = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : int ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: Optional[int] = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
__lowerCAmelCase: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self : int ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
__lowerCAmelCase: Any = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCAmelCase: Tuple = self.default_image_processor
__lowerCAmelCase: str = prepare_img()
__lowerCAmelCase: Optional[int] = image_processor(images=UpperCAmelCase , return_tensors='tf' ).pixel_values
__lowerCAmelCase: Dict = tf.constant([[1, 2]] )
__lowerCAmelCase: str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCAmelCase: List[str] = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
__lowerCAmelCase: Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
__lowerCAmelCase: str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 322
| 0
|
def __lowercase ( snake_case_ : Union[str, Any] ,snake_case_ : int ,snake_case_ : str=False ) ->Optional[Any]:
'''simple docstring'''
if isinstance(snake_case_ ,snake_case_ ) and isinstance(snake_case_ ,snake_case_ ):
__A : Optional[int] = len(set_a.intersection(snake_case_ ) )
if alternative_union:
__A : Tuple = len(snake_case_ ) + len(snake_case_ )
else:
__A : List[Any] = len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ ,(list, tuple) ) and isinstance(snake_case_ ,(list, tuple) ):
__A : str = [element for element in set_a if element in set_b]
if alternative_union:
__A : List[Any] = len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
__A : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
a_ = {"""a""", """b""", """c""", """d""", """e"""}
a_ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 352
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ = pytest.mark.integration
a_ = {"""comet"""}
a_ = importlib.util.find_spec("""fairseq""") is not None
a_ = {"""code_eval"""}
a_ = os.name == """nt"""
a_ = {"""bertscore""", """frugalscore""", """perplexity"""}
a_ = importlib.util.find_spec("""transformers""") is not None
def __lowercase ( snake_case_ : str ) ->Any:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : List[Any] ,snake_case_ : int ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( snake_case_ : int ) ->str:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : List[Any] ,snake_case_ : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( snake_case_ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
@wraps(snake_case_ )
def wrapper(self : int ,snake_case_ : Dict ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,snake_case_ )
return wrapper
def __lowercase ( ) ->Tuple:
'''simple docstring'''
__A : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class __snake_case ( parameterized.TestCase ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = '''[...]'''
__A : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path )
__A : str = datasets.load.import_main_class(metric_module.__name__ , dataset=__lowerCamelCase )
# check parameters
__A : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__lowerCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__A : Tuple = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = '''[...]'''
__A : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , __lowerCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__A : Union[str, Any] = doctest.testmod(__lowerCamelCase , verbose=__lowerCamelCase , raise_on_error=__lowerCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__lowerCamelCase ):
yield
else:
yield
@contextmanager
def UpperCamelCase__( self ):
'''simple docstring'''
def load_local_metric(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
return load_metric(os.path.join('''metrics''' , __lowerCamelCase ) , *__lowerCamelCase , **__lowerCamelCase )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__A : List[Any] = load_local_metric
yield
@classmethod
def UpperCamelCase__( cls , __lowerCamelCase ):
'''simple docstring'''
def wrapper(__lowerCamelCase ):
__A : Any = contextmanager(__lowerCamelCase )
__A : Optional[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __lowercase ( snake_case_ : Tuple ) ->int:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__A : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __lowercase ( snake_case_ : List[str] ) ->Dict:
'''simple docstring'''
import torch
def bert_cos_score_idf(snake_case_ : Union[str, Any] ,snake_case_ : List[str] ,*snake_case_ : List[str] ,**snake_case_ : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__A : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __lowercase ( snake_case_ : Optional[int] ) ->List[Any]:
'''simple docstring'''
def load_from_checkpoint(snake_case_ : str ):
class __snake_case :
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
assert len(__lowerCamelCase ) == 2
__A : str = [0.1_9, 0.9_2]
return scores, sum(__lowerCamelCase ) / len(__lowerCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__A : int = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__A : Dict = load_from_checkpoint
yield
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Optional[Any] = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
__A : Optional[int] = '''ERROR'''
__A : str = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(snake_case_ ,match=re.escape(snake_case_ ) ):
metric.compute(predictions=[] ,references=[] ,scheme=snake_case_ )
| 291
| 0
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 293
|
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21
| 0
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : List[str] ={
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase : Tuple ={
"allenai/led-base-16384": 1_6384,
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = LEDTokenizer
__lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self :Union[str, Any] , lowercase_ :Optional[Any]=None , lowercase_ :List[Any]=None , lowercase_ :Dict=None , lowercase_ :List[str]="replace" , lowercase_ :Dict="<s>" , lowercase_ :Union[str, Any]="</s>" , lowercase_ :Union[str, Any]="</s>" , lowercase_ :int="<s>" , lowercase_ :Optional[Any]="<unk>" , lowercase_ :List[str]="<pad>" , lowercase_ :Optional[int]="<mask>" , lowercase_ :Union[str, Any]=False , lowercase_ :Dict=True , **lowercase_ :Optional[Any] , )-> Optional[int]:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
A__ = getattr(lowercase_ , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**lowercase_ )
A__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ = "post_processor"
A__ = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["sep"] )
if "cls" in state:
A__ = tuple(state["cls"] )
A__ = False
if state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("trim_offsets" , lowercase_ ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(lowercase_ , state.pop("type" ) )
A__ = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCAmelCase_ ( self :str )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[str] )-> Optional[int]:
A__ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
A__ = value
def UpperCAmelCase_ ( self :List[Any] , *lowercase_ :Optional[Any] , **lowercase_ :Tuple )-> BatchEncoding:
A__ = kwargs.get("is_split_into_words" , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , *lowercase_ :Optional[Any] , **lowercase_ :Optional[Any] )-> BatchEncoding:
A__ = kwargs.get("is_split_into_words" , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :str , lowercase_ :Optional[str] = None )-> Tuple[str]:
A__ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self :Any , lowercase_ :int , lowercase_ :Union[str, Any]=None )-> Optional[Any]:
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self :List[str] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None )-> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Union[Dict[str, EncodedInput], BatchEncoding] , lowercase_ :Optional[int] = None , lowercase_ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowercase_ :Optional[int] = None , lowercase_ :Optional[bool] = None , )-> dict:
A__ = super()._pad(
encoded_inputs=lowercase_ , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
# Load from model defaults
if return_attention_mask is None:
A__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ = len(encoded_inputs["global_attention_mask"] ) != len(lowercase_ )
if needs_to_be_padded:
A__ = len(lowercase_ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 123
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def UpperCamelCase ( _lowerCamelCase : int ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A__ = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("encoder" ):
A__ = k.replace(".attn" , ".self_attn" )
A__ = k.replace("norm1" , "self_attn_layer_norm" )
A__ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
A__ = k.replace("norm1" , "self_attn_layer_norm" )
A__ = k.replace("norm2" , "encoder_attn_layer_norm" )
A__ = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
A__ = sd.pop(_lowerCamelCase )
A__ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
A__ = v
__lowerCAmelCase : Optional[int] =["START"]
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
A__ = model["model"]
A__ = BlenderbotConfig.from_json_file(_lowerCamelCase )
A__ = BlenderbotForConditionalGeneration(_lowerCamelCase )
A__ = m.model.state_dict().keys()
A__ = []
A__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A__ = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 123
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.