code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = ["""vqvae"""]
def __init__( self : List[Any] , lowerCamelCase_ : AutoencoderKL , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : Mel , lowerCamelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , mel=lowerCamelCase_ , vqvae=lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return 50 if isinstance(self.scheduler , lowerCamelCase_ ) else 1000
@torch.no_grad()
def __call__( self : str , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = None , lowerCamelCase_ : np.ndarray = None , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = None , lowerCamelCase_ : torch.Generator = None , lowerCamelCase_ : float = 0 , lowerCamelCase_ : float = 0 , lowerCamelCase_ : torch.Generator = None , lowerCamelCase_ : float = 0 , lowerCamelCase_ : torch.Tensor = None , lowerCamelCase_ : torch.Tensor = None , lowerCamelCase_ : Optional[Any]=True , ):
"""simple docstring"""
UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCamelCase_ )
UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCamelCase_ , device=self.device , )
UpperCamelCase = noise
UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = self.mel.audio_slice_to_image(lowerCamelCase_ )
UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCamelCase = (input_image / 255) * 2 - 1
UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCamelCase = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_ , 0 ) ).latent_dist.sample(
generator=lowerCamelCase_ )[0]
UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , self.scheduler.timesteps[start_step - 1] )
UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCamelCase = int(mask_start_secs * pixels_per_second )
UpperCamelCase = int(mask_end_secs * pixels_per_second )
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCamelCase_ ):
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
else:
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
if isinstance(self.scheduler , lowerCamelCase_ ):
UpperCamelCase = self.scheduler.step(
model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , )["""prev_sample"""]
else:
UpperCamelCase = self.scheduler.step(
model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , generator=lowerCamelCase_ , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
UpperCamelCase = self.vqvae.decode(lowerCamelCase_ )["""sample"""]
UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCamelCase = (images * 255).round().astype("""uint8""" )
UpperCamelCase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCamelCase_ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCamelCase = [self.mel.image_to_audio(lowerCamelCase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCamelCase_ ) )
@torch.no_grad()
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Image.Image] , lowerCamelCase_ : int = 50 ):
"""simple docstring"""
assert isinstance(self.scheduler , lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ )
UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCamelCase = (sample / 255) * 2 - 1
UpperCamelCase = torch.Tensor(lowerCamelCase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCamelCase = self.scheduler.alphas_cumprod[t]
UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ )["""sample"""]
UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase_ ( lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : float ):
"""simple docstring"""
UpperCamelCase = acos(torch.dot(torch.flatten(lowerCamelCase_ ) , torch.flatten(lowerCamelCase_ ) ) / torch.norm(lowerCamelCase_ ) / torch.norm(lowerCamelCase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase_ )
| 343
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343
| 1
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCamelCase : int = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCamelCase : Any = '''Dummy User'''
_lowerCamelCase : List[Any] = '''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'''
_lowerCamelCase : List[Any] = '''https://hub-ci.huggingface.co'''
_lowerCamelCase : Tuple = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCamelCase : List[str] = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCamelCase : Tuple = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def a_ ( __lowercase : List[Any] ) -> str:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __lowercase )
@pytest.fixture
def a_ ( __lowercase : List[str] ) -> Tuple:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __lowercase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __lowercase )
@pytest.fixture
def a_ ( __lowercase : Optional[Any] ) -> Tuple:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __lowercase )
@pytest.fixture
def a_ ( __lowercase : List[str] , __lowercase : Dict ) -> Optional[Any]:
HfFolder.save_token(__lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def a_ ( ) -> Optional[int]:
return HfApi(endpoint=__lowercase )
@pytest.fixture(scope='session' )
def a_ ( __lowercase : HfApi ) -> List[Any]:
_snake_case = HfFolder.get_token()
HfFolder.save_token(__lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowercase )
@pytest.fixture
def a_ ( __lowercase : Any ) -> List[Any]:
def _cleanup_repo(__lowercase : str ):
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def a_ ( __lowercase : str ) -> List[str]:
@contextmanager
def _temporary_repo(__lowercase : int ):
try:
yield repo_id
finally:
cleanup_repo(__lowercase )
return _temporary_repo
@pytest.fixture(scope='session' )
def a_ ( __lowercase : HfApi , __lowercase : str , __lowercase : Tuple ) -> Optional[int]:
_snake_case = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
_snake_case = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase )
hf_api.upload_file(
token=__lowercase , path_or_fileobj=str(__lowercase ) , path_in_repo='data/text_data.txt' , repo_id=__lowercase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Dict:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def a_ ( __lowercase : HfApi , __lowercase : Dict , __lowercase : Any ) -> Tuple:
_snake_case = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
_snake_case = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase )
hf_api.upload_file(
token=__lowercase , path_or_fileobj=str(__lowercase ) , path_in_repo='data.zip' , repo_id=__lowercase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a_ ( __lowercase : int , __lowercase : Tuple , __lowercase : List[Any] ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def a_ ( __lowercase : HfApi , __lowercase : List[str] , __lowercase : int ) -> Optional[Any]:
_snake_case = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
_snake_case = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase )
hf_api.upload_file(
token=__lowercase , path_or_fileobj=str(__lowercase ) , path_in_repo='data.zip' , repo_id=__lowercase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a_ ( __lowercase : Dict , __lowercase : str , __lowercase : Optional[int] ) -> str:
return hf_private_dataset_repo_zipped_img_data_
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130
| 0
|
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = "down"
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = "down"
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = "down"
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = "down"
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = "down"
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = "down"
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = "down"
@property
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = "down"
@property
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = "down"
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = "mid"
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = "mid"
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = "mid"
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = super().prepare_init_args_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"""in_channels""": 32, """out_channels""": 32}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = "up"
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {"""in_channels""": 32, """out_channels""": 32}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__SCREAMING_SNAKE_CASE )
| 267
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = "maskformer-swin"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=96 , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=4.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 267
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_lowercase : Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any = False ):
"""simple docstring"""
lowercase_ : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase_ : Union[str, Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase_ : str = """cpu"""
lowercase_ : str = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=_lowerCAmelCase ).to(_lowerCAmelCase )
lowercase_ : Union[str, Any] = Path(_lowerCAmelCase )
# TEXT ENCODER
lowercase_ : Dict = pipeline.text_encoder.config.max_position_embeddings
lowercase_ : Tuple = pipeline.text_encoder.config.hidden_size
lowercase_ : Union[str, Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=_lowerCAmelCase , )
del pipeline.text_encoder
# UNET
lowercase_ : str = pipeline.unet.config.in_channels
lowercase_ : Optional[int] = pipeline.unet.config.sample_size
lowercase_ : Tuple = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=_lowerCAmelCase , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , )
lowercase_ : str = str(unet_path.absolute().as_posix() )
lowercase_ : Any = os.path.dirname(_lowerCAmelCase )
lowercase_ : List[Any] = onnx.load(_lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(_lowerCAmelCase )
os.mkdir(_lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
_lowerCAmelCase , _lowerCAmelCase , save_as_external_data=_lowerCAmelCase , all_tensors_to_one_file=_lowerCAmelCase , location='''weights.pb''' , convert_attribute=_lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
lowercase_ : Any = pipeline.vae
lowercase_ : Union[str, Any] = vae_encoder.config.in_channels
lowercase_ : Tuple = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowercase_ : Optional[int] = lambda __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : vae_encoder.encode(_lowerCAmelCase , _lowerCAmelCase )[0].sample()
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_lowerCAmelCase , )
# VAE DECODER
lowercase_ : Optional[Any] = pipeline.vae
lowercase_ : Dict = vae_decoder.config.latent_channels
lowercase_ : str = vae_decoder.config.out_channels
# forward only through the decoder part
lowercase_ : Union[str, Any] = vae_encoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowercase_ : Optional[Any] = pipeline.safety_checker
lowercase_ : List[Any] = safety_checker.config.vision_config.num_channels
lowercase_ : Tuple = safety_checker.config.vision_config.image_size
lowercase_ : Dict = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=_lowerCAmelCase , )
del pipeline.safety_checker
lowercase_ : Dict = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
lowercase_ : Any = pipeline.feature_extractor
else:
lowercase_ : Any = None
lowercase_ : Dict = None
lowercase_ : Tuple = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_lowerCAmelCase )
print('''ONNX pipeline saved to''' , _lowerCAmelCase )
del pipeline
del onnx_pipeline
lowercase_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowercase : str = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 371
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : int = {}
lowercase_ : Any = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase_ : Tuple = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ : Tuple = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ : str = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flax_dict[key]
lowercase_ : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ : str = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
lowercase_ : List[str] = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
lowercase_ : List[str] = PixaStructVisionConfig()
lowercase_ : Optional[Any] = PixaStructTextConfig()
else:
lowercase_ : Optional[int] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ : Dict = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : int = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase_ : List[Any] = PixaStructImageProcessor()
lowercase_ : int = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
lowercase_ : Tuple = 4096
lowercase_ : Optional[int] = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowercase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 264
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=_snake_case , )
assert hasattr(self , """env""" )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCAmelCase = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_snake_case , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version="""py36""" , )
def snake_case ( self , _snake_case ):
"""simple docstring"""
TrainingJobAnalytics(_snake_case ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
_lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _snake_case )
| 82
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
UpperCAmelCase : str = '▁'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
__a = BarthezTokenizer
def __init__( self : Optional[int] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : Any="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : int="<mask>" , **UpperCamelCase : Any , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , )
__UpperCAmelCase : List[Any] = vocab_file
__UpperCAmelCase : Tuple = False if not self.vocab_file else True
def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 115
| 0
|
lowercase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
A__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase_ = [None] * 10000000
lowercase_ = True
lowercase_ = False
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A__ = chain(next_number(SCREAMING_SNAKE_CASE__ ) )
A__ = number_chain
while number < 10000000:
A__ = number_chain
number *= 10
return number_chain
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 364
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : list[tuple[float, float]] )-> Optional[int]:
'''simple docstring'''
A__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
A__ = len(lowercase_ ) - 1
def snake_case__ ( self : List[Any],lowercase_ : float )-> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase_ ),5 ) == 1
return output_values
def snake_case__ ( self : str,lowercase_ : float )-> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A__ = self.basis_function(lowercase_ )
A__ = 0.0
A__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case__ ( self : str,lowercase_ : float = 0.01 )-> str:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
A__ = [] # x coordinates of points to plot
A__ = [] # y coordinates of points to plot
A__ = 0.0
while t <= 1:
A__ = self.bezier_curve_function(lowercase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
A__ = [i[0] for i in self.list_of_points]
A__ = [i[1] for i in self.list_of_points]
plt.plot(
lowercase_,lowercase_,color='blue',label='Curve of Degree ' + str(self.degree ),)
plt.scatter(lowercase_,lowercase_,color='red',label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 282
| 0
|
from __future__ import annotations
def _a ( a :int , a :int ) -> list[list[int]]:
a = []
create_all_state(1 , _UpperCamelCase , _UpperCamelCase , [] , _UpperCamelCase )
return result
def _a ( a :int , a :int , a :int , a :list[int] , a :list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_UpperCamelCase , total_number - level + 2 ):
current_list.append(_UpperCamelCase )
create_all_state(i + 1 , _UpperCamelCase , level - 1 , _UpperCamelCase , _UpperCamelCase )
current_list.pop()
def _a ( a :list[list[int]] ) -> None:
for i in total_list:
print(*_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = 4
UpperCAmelCase__ = 2
UpperCAmelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
snake_case = ''
else:
snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case = dct.pop(_UpperCamelCase )
snake_case = val
def lowerCAmelCase__ ( ) -> Dict:
"""simple docstring"""
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
snake_case = DeiTConfig()
# all deit models have fine-tuned heads
snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case = 1_0_0_0
snake_case = 'huggingface/label-files'
snake_case = 'imagenet-1k-id2label.json'
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = int(deit_name[-6:-4] )
snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case = 1_9_2
snake_case = 7_6_8
snake_case = 1_2
snake_case = 3
elif deit_name[9:].startswith('small' ):
snake_case = 3_8_4
snake_case = 1_5_3_6
snake_case = 1_2
snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 2_4
snake_case = 1_6
# load original model from timm
snake_case = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case = timm_model.state_dict()
snake_case = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
snake_case = DeiTForImageClassificationWithTeacher(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case = DeiTImageProcessor(size=_UpperCamelCase , crop_size=config.image_size )
snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case = encoding['pixel_values']
snake_case = model(_UpperCamelCase )
snake_case = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 150
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = ["model.decoder.embed_positions.weights"]
def A ( _lowerCamelCase ):
'''simple docstring'''
if "emb" in name:
_lowerCAmelCase : List[Any] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_lowerCAmelCase : Optional[Any] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_lowerCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_lowerCAmelCase : Dict = name.replace("linear1" , "fc1" )
if "linear2" in name:
_lowerCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_lowerCAmelCase : Tuple = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_lowerCAmelCase : Any = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_lowerCAmelCase : Dict = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = list(state_dict.keys() )
_lowerCAmelCase : str = {}
for key in keys:
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = rename_keys(_lowerCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase : int = val[:hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase : Optional[int] = val
else:
_lowerCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
if checkpoint == "small":
# default config values
_lowerCAmelCase : Optional[int] = 1_024
_lowerCAmelCase : List[str] = 24
_lowerCAmelCase : int = 16
elif checkpoint == "medium":
_lowerCAmelCase : str = 1_536
_lowerCAmelCase : str = 48
_lowerCAmelCase : Union[str, Any] = 24
elif checkpoint == "large":
_lowerCAmelCase : Dict = 2_048
_lowerCAmelCase : Any = 48
_lowerCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
_lowerCAmelCase : int = MusicgenDecoderConfig(
hidden_size=_lowerCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCamelCase , num_attention_heads=_lowerCamelCase , )
return config
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="cpu" ):
'''simple docstring'''
_lowerCAmelCase : Any = MusicGen.get_pretrained(_lowerCamelCase , device=_lowerCamelCase )
_lowerCAmelCase : int = decoder_config_from_checkpoint(_lowerCamelCase )
_lowerCAmelCase : List[Any] = fairseq_model.lm.state_dict()
_lowerCAmelCase , _lowerCAmelCase : Any = rename_state_dict(
_lowerCamelCase , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase : Optional[Any] = TaEncoderModel.from_pretrained("t5-base" )
_lowerCAmelCase : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_lowerCAmelCase : Union[str, Any] = MusicgenForCausalLM(_lowerCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = decoder.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(_lowerCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
_lowerCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCamelCase , audio_encoder=_lowerCamelCase , decoder=_lowerCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCamelCase )
# check we can do a forward pass
_lowerCAmelCase : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase : Dict = model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("t5-base" )
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_lowerCAmelCase : Tuple = MusicgenProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
# set the appropriate bos/pad token ids
_lowerCAmelCase : Optional[Any] = 2_048
_lowerCAmelCase : Any = 2_048
# set other default generation config params
_lowerCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(_lowerCamelCase )
processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_snake_case = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 300
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300
| 1
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''AutoImageProcessor'''
snake_case_ = '''AutoTokenizer'''
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> str:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
__a = False
def __call__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__a = kwargs.pop('''images''' , _snake_case )
__a = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__a = args[0]
__a = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__a = self.image_processor(_snake_case , *_snake_case , **_snake_case )
if text is not None:
__a = self.tokenizer(_snake_case , **_snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
__a = True
__a = self.tokenizer
yield
__a = self.image_processor
__a = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=False , _snake_case=None ) -> int:
'''simple docstring'''
if added_vocab is None:
__a = self.tokenizer.get_added_vocab()
__a = {}
while tokens:
__a = re.search(r'''<s_(.*?)>''' , _snake_case , re.IGNORECASE )
if start_token is None:
break
__a = start_token.group(1 )
__a = re.search(rF"""</s_{key}>""" , _snake_case , re.IGNORECASE )
__a = start_token.group()
if end_token is None:
__a = tokens.replace(_snake_case , '''''' )
else:
__a = end_token.group()
__a = re.escape(_snake_case )
__a = re.escape(_snake_case )
__a = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _snake_case , re.IGNORECASE )
if content is not None:
__a = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__a = self.tokenajson(_snake_case , is_inner_value=_snake_case , added_vocab=_snake_case )
if value:
if len(_snake_case ) == 1:
__a = value[0]
__a = value
else: # leaf nodes
__a = []
for leaf in content.split(r'''<sep/>''' ):
__a = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__a = leaf[1:-2] # for categorical special tokens
output[key].append(_snake_case )
if len(output[key] ) == 1:
__a = output[key][0]
__a = tokens[tokens.find(_snake_case ) + len(_snake_case ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_snake_case , added_vocab=_snake_case )
if len(_snake_case ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 6
|
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase: Tuple = get_tests_dir('fixtures')
_UpperCamelCase: Optional[int] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_UpperCamelCase: str = get_tests_dir('fixtures/dummy-config.json')
class a__ ( unittest.TestCase ):
def lowercase ( self : str ) -> List[Any]:
lowercase : Tuple = 0
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : str ) -> Dict:
lowercase : List[str] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Any ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase ).to_dict()
config_dict.pop('feature_extractor_type' )
lowercase : str = WavaVecaFeatureExtractor(**lowerCAmelCase )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
config.save_pretrained(lowerCAmelCase )
lowercase : int = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : Any = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Dict ) -> Dict:
lowercase : Any = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowerCAmelCase, 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained('bert-base' )
def lowercase ( self : Dict ) -> str:
with self.assertRaisesRegex(
lowerCAmelCase, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase, revision='aaaaaa' )
def lowercase ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
lowercase : Tuple = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def lowercase ( self : Tuple ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase ):
lowercase : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
lowercase : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase, trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
def lowercase ( self : List[str] ) -> int:
try:
AutoConfig.register('custom', lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase ( self : List[str] ) -> int:
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = True
try:
AutoConfig.register('custom', lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase, lowerCAmelCase )
# If remote code is not set, the default is to use local
lowercase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowercase : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor', trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor' )
self.assertTrue(not hasattr(lowerCAmelCase, 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 53
|
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = PhobertTokenizer
_lowerCamelCase = False
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Optional[Any] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
lowercase : Any = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowercase : int = ['#version: 0.2', 'l à</w>']
lowercase : Tuple = {'unk_token': '<unk>'}
lowercase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def lowercase ( self : List[str], **lowerCAmelCase : Optional[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : Dict ) -> Optional[int]:
lowercase : List[Any] = 'Tôi là VinAI Research'
lowercase : Any = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def lowercase ( self : int ) -> Tuple:
lowercase : List[Any] = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase : List[str] = 'Tôi là VinAI Research'
lowercase : Dict = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
lowercase : int = tokenizer.tokenize(lowerCAmelCase )
print(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowercase : str = tokens + [tokenizer.unk_token]
lowercase : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
| 53
| 1
|
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = image.size
__lowerCAmelCase = 0
__lowerCAmelCase = image.load()
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
__lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase):
for i in range(lowerCamelCase):
__lowerCAmelCase = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase : Dict = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 174
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase = set()
return any(
node not in visited and depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
for node in graph)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
visited.add(lowerCamelCase)
rec_stk.add(lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 174
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A : List[Any] = logging.getLogger(__name__)
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "sequence-classification"
def __init__(self : Tuple , __SCREAMING_SNAKE_CASE : str):
if type(__SCREAMING_SNAKE_CASE) == dict:
A = Namespace(**__SCREAMING_SNAKE_CASE)
A = glue_output_modes[hparams.task]
A = glue_tasks_num_labels[hparams.task]
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.mode)
def SCREAMING_SNAKE_CASE__ (self : Any , **__SCREAMING_SNAKE_CASE : Optional[int]):
return self.model(**__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any):
A = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A = self(**__SCREAMING_SNAKE_CASE)
A = outputs[0]
A = self.trainer.lr_schedulers[0]["scheduler"]
A = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def SCREAMING_SNAKE_CASE__ (self : str):
A = self.hparams
A = processors[args.task]()
A = processor.get_labels()
for mode in ["train", "dev"]:
A = self._feature_file(__SCREAMING_SNAKE_CASE)
if os.path.exists(__SCREAMING_SNAKE_CASE) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __SCREAMING_SNAKE_CASE)
else:
logger.info("Creating features from dataset file at %s" , args.data_dir)
A = (
processor.get_dev_examples(args.data_dir)
if mode == "dev"
else processor.get_train_examples(args.data_dir)
)
A = convert_examples_to_features(
__SCREAMING_SNAKE_CASE , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , __SCREAMING_SNAKE_CASE)
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False):
A = "dev" if mode == "test" else mode
A = self._feature_file(__SCREAMING_SNAKE_CASE)
logger.info("Loading features from cached file %s" , __SCREAMING_SNAKE_CASE)
A = torch.load(__SCREAMING_SNAKE_CASE)
A = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
A = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
A = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , batch_size=__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]):
A = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
A = self(**__SCREAMING_SNAKE_CASE)
A , A = outputs[:2]
A = logits.detach().cpu().numpy()
A = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE__ (self : Tuple , __SCREAMING_SNAKE_CASE : Dict):
A = torch.stack([x["val_loss"] for x in outputs]).mean().detach().cpu().item()
A = np.concatenate([x["pred"] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
A = np.argmax(__SCREAMING_SNAKE_CASE , axis=1)
elif self.hparams.glue_output_mode == "regression":
A = np.squeeze(__SCREAMING_SNAKE_CASE)
A = np.concatenate([x["target"] for x in outputs] , axis=0)
A = [[] for _ in range(out_label_ids.shape[0])]
A = [[] for _ in range(out_label_ids.shape[0])]
A = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)}
A = dict(results.items())
A = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE__ (self : Dict , __SCREAMING_SNAKE_CASE : list):
A , A , A = self._eval_end(__SCREAMING_SNAKE_CASE)
A = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE__ (self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple):
A , A , A = self._eval_end(__SCREAMING_SNAKE_CASE)
A = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE__ (__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int):
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=__SCREAMING_SNAKE_CASE , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=__SCREAMING_SNAKE_CASE , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
return parser
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = argparse.ArgumentParser()
add_generic_args(lowercase__ , os.getcwd() )
A = GLUETransformer.add_model_specific_args(lowercase__ , os.getcwd() )
A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A = os.path.join(
"./results" , F"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , )
os.makedirs(args.output_dir )
A = GLUETransformer(lowercase__ )
A = generic_train(lowercase__ , lowercase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=lowercase__ ) )
A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowercase__ )
if __name__ == "__main__":
main()
| 353
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : Union[str, Any] = get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
SCREAMING_SNAKE_CASE = "all_checks"
SCREAMING_SNAKE_CASE = "basic_checks"
SCREAMING_SNAKE_CASE = "no_checks"
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
A = " for " + verification_name if verification_name is not None else ""
if len(lowercase__ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
class __UpperCamelCase ( _A ):
pass
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
if len(set(lowercase__ ) - set(lowercase__ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase__ ) - set(lowercase__ ) ) )
A = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase__ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase__ ) )
logger.info("All the splits matched successfully." )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ = True ):
"""simple docstring"""
if record_checksum:
A = shaaaa()
with open(lowercase__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(lowercase__ )
A = m.hexdigest()
else:
A = None
return {"num_bytes": os.path.getsize(lowercase__ ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 57
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
snake_case : List[str] = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _snake_case ( unittest.TestCase , _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = load_tool('''text-question-answering''' )
self.tool.setup()
a :List[str] = load_tool('''text-question-answering''' , remote=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.tool(_lowerCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = self.remote_tool(_lowerCamelCase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.tool(text=_lowerCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.remote_tool(text=_lowerCamelCase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowerCamelCase , '''launched the BigScience Research Workshop''' )
| 94
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : str ):
"""simple docstring"""
a , a :Optional[Any] = set(UpperCAmelCase_ ), [start]
while stack:
a :Optional[int] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
snake_case : Optional[int] = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 94
| 1
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 371
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : int
_UpperCAmelCase : int
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: list[list[Edge]] = [[] for _ in range(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: Dict = size
def __getitem__( self : Dict , lowerCAmelCase__ : int):
return iter(self._graph[vertex])
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return self._size
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = deque([start_vertex])
SCREAMING_SNAKE_CASE_: list[int | None] = [None] * self.size
SCREAMING_SNAKE_CASE_: List[Any] = 0
while queue:
SCREAMING_SNAKE_CASE_: int = queue.popleft()
SCREAMING_SNAKE_CASE_: str = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_: Optional[int] = current_distance + edge.weight
SCREAMING_SNAKE_CASE_: str = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_: Any = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127
| 0
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = x
__UpperCamelCase : Optional[int] = y
for step in range(snake_case__ ): # noqa: B007
__UpperCamelCase : str = a * a - b * b + x
__UpperCamelCase : Union[str, Any] = 2 * a * b + y
__UpperCamelCase : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase ( snake_case__ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __lowerCAmelCase ( snake_case__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def __lowerCAmelCase ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ):
__UpperCamelCase : Tuple = Image.new("RGB" , (image_width, image_height) )
__UpperCamelCase : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
__UpperCamelCase : Dict = figure_width / image_width * image_height
__UpperCamelCase : List[str] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__UpperCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__UpperCamelCase : str = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__UpperCamelCase : List[Any] = get_color_coded_rgb(snake_case__ )
else:
__UpperCamelCase : Dict = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 298
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = '''RegNetConfig'''
# Base docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCAmelCase = '''facebook/regnet-y-040'''
_lowerCAmelCase = '''tabby, tabby cat'''
_lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__UpperCamelCase : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__UpperCamelCase : Tuple = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=_UpperCAmelCase , strides=_UpperCAmelCase , padding="VALID" , groups=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" , )
__UpperCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
__UpperCamelCase : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def a_ (self , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = self.convolution(self.padding(_UpperCAmelCase ) )
__UpperCamelCase : Dict = self.normalization(_UpperCAmelCase )
__UpperCamelCase : Dict = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = config.num_channels
__UpperCamelCase : str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def a_ (self , _UpperCAmelCase ) -> Tuple:
__UpperCamelCase : Dict = shape_list(_UpperCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__UpperCamelCase : Any = tf.transpose(_UpperCAmelCase , perm=(0, 2, 3, 1) )
__UpperCamelCase : List[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Any = tf.keras.layers.ConvaD(
filters=_UpperCAmelCase , kernel_size=1 , strides=_UpperCAmelCase , use_bias=_UpperCAmelCase , name="convolution" )
__UpperCamelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False ) -> tf.Tensor:
return self.normalization(self.convolution(_UpperCAmelCase ) , training=_UpperCAmelCase )
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
__UpperCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def a_ (self , _UpperCAmelCase ) -> Tuple:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__UpperCamelCase : List[str] = self.pooler(_UpperCAmelCase )
for layer_module in self.attention:
__UpperCamelCase : str = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[Any] = in_channels != out_channels or stride != 1
__UpperCamelCase : List[str] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : List[Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__UpperCamelCase : Optional[Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.2" ),
]
__UpperCamelCase : Dict = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : List[Any] = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
__UpperCamelCase : List[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : str = in_channels != out_channels or stride != 1
__UpperCamelCase : Optional[int] = max(1 , out_channels // config.groups_width )
__UpperCamelCase : Union[str, Any] = (
TFRegNetShortCut(_UpperCAmelCase , stride=_UpperCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
__UpperCamelCase : Union[str, Any] = [
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase , name="layer.3" ),
]
__UpperCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def a_ (self , _UpperCAmelCase ) -> int:
__UpperCamelCase : str = hidden_state
for layer_module in self.layers:
__UpperCamelCase : Any = layer_module(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__UpperCamelCase : Union[str, Any] = self.activation(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ) -> int:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__UpperCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , name="layers.0" ),
*[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def a_ (self , _UpperCAmelCase ) -> Any:
for layer_module in self.layers:
__UpperCamelCase : Dict = layer_module(_UpperCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> str:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Dict = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
__UpperCamelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase , name=f"stages.{i+1}" ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ) -> TFBaseModelOutputWithNoAttention:
__UpperCamelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCamelCase : Any = hidden_states + (hidden_state,)
__UpperCamelCase : Any = stage_module(_UpperCAmelCase )
if output_hidden_states:
__UpperCamelCase : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
'''simple docstring'''
A = RegNetConfig
def __init__(self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Optional[int] = config
__UpperCamelCase : List[Any] = TFRegNetEmbeddings(_UpperCAmelCase , name="embedder" )
__UpperCamelCase : Union[str, Any] = TFRegNetEncoder(_UpperCAmelCase , name="encoder" )
__UpperCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCAmelCase , name="pooler" )
@unpack_inputs
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
__UpperCamelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Union[str, Any] = self.embedder(_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : str = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : List[str] = encoder_outputs[0]
__UpperCamelCase : Tuple = self.pooler(_UpperCAmelCase )
# Change to NCHW output format have uniformity in the modules
__UpperCamelCase : List[str] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
__UpperCamelCase : List[Any] = tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__UpperCamelCase : List[str] = tuple([tf.transpose(_UpperCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = RegNetConfig
A = "regnet"
A = "pixel_values"
@property
def a_ (self ) -> List[Any]:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
__UpperCamelCase : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Tuple = self.regnet(
pixel_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
super().__init__(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = config.num_labels
__UpperCamelCase : Any = TFRegNetMainLayer(_UpperCAmelCase , name="regnet" )
# classification head
__UpperCamelCase : List[str] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ (self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
__UpperCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase : Dict = self.regnet(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase , training=_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase : List[str] = self.classifier[0](_UpperCAmelCase )
__UpperCamelCase : Optional[int] = self.classifier[1](_UpperCAmelCase )
__UpperCamelCase : str = None if labels is None else self.hf_compute_loss(labels=_UpperCAmelCase , logits=_UpperCAmelCase )
if not return_dict:
__UpperCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 298
| 1
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a__ ( __lowercase , __lowercase ) -> Dict:
_A = old_name
if "patch_embed" in old_name:
_A , _A , _A = old_name.split("." )
if layer == "0":
_A = old_name.replace("0" , "convolution1" )
elif layer == "1":
_A = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_A = old_name.replace("3" , "convolution2" )
else:
_A = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowercase ):
_A = R"\b\d{2}\b"
if bool(re.search(__lowercase , __lowercase ) ):
_A = re.search(R"\d\.\d\d." , __lowercase ).group()
else:
_A = re.search(R"\d\.\d." , __lowercase ).group()
if int(match[0] ) < 6:
_A = old_name.replace(__lowercase , "" )
_A = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_A = "intermediate_stages." + trimmed_name
else:
_A = old_name.replace(__lowercase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_A = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_A = str(int(match[2] ) - num_meta4D_last_stage )
_A = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_A = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_A = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_A = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_A = trimmed_name.replace("fc2" , "linear_out" )
_A = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowercase ):
_A = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_A = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_A = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_A = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_A = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_A = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_A = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_A = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_A = new_name.replace("norm" , "layernorm" )
_A = "efficientformer." + new_name
else:
_A = "efficientformer.encoder." + new_name
return new_name
def a__ ( __lowercase , __lowercase ) -> List[str]:
for key in checkpoint.copy().keys():
_A = checkpoint.pop(__lowercase )
_A = val
return checkpoint
def a__ ( ) -> Dict:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return image
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
_A = EfficientFormerConfig.from_json_file(__lowercase )
_A = EfficientFormerForImageClassificationWithTeacher(__lowercase )
_A = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_A = config.depths[-1] - config.num_metaad_blocks + 1
_A = convert_torch_checkpoint(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
_A = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_A = prepare_img()
_A = 256
_A = 224
_A = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_A = processor(images=__lowercase , return_tensors="pt" ).pixel_values
# original processing pipeline
_A = Compose(
[
Resize(__lowercase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowercase ),
ToTensor(),
Normalize(__lowercase , __lowercase ),
] )
_A = image_transforms(__lowercase ).unsqueeze(0 )
assert torch.allclose(__lowercase , __lowercase )
_A = model(__lowercase )
_A = outputs.logits
_A = (1, 1000)
if "l1" in model_name:
_A = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , __lowercase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_A = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , __lowercase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_A = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowercase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowercase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowercase , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
a_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 163
|
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = "PoolFormerConfig"
# Base docstring
a_ = "sail/poolformer_s12"
a_ = [1, 5_12, 7, 7]
# Image classification docstring
a_ = "sail/poolformer_s12"
a_ = "tabby, tabby cat"
a_ = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a__ ( __lowercase , __lowercase = 0.0 , __lowercase = False ) -> Dict:
if drop_prob == 0.0 or not training:
return input
_A = 1 - drop_prob
_A = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_A = keep_prob + torch.rand(__lowercase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_A = input.div(__lowercase ) * random_tensor
return output
class snake_case ( nn.Module):
def __init__( self : Any , a__ : Optional[float] = None ) -> None:
'''simple docstring'''
super().__init__()
_A = drop_prob
def a_ ( self : Optional[Any] , a__ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return drop_path(a__ , self.drop_prob , self.training )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class snake_case ( nn.Module):
def __init__( self : Union[str, Any] , a__ : List[Any] , a__ : Any , a__ : List[Any] , a__ : Optional[int] , a__ : Dict , a__ : str=None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = patch_size if isinstance(a__ , collections.abc.Iterable ) else (patch_size, patch_size)
_A = stride if isinstance(a__ , collections.abc.Iterable ) else (stride, stride)
_A = padding if isinstance(a__ , collections.abc.Iterable ) else (padding, padding)
_A = nn.Convad(a__ , a__ , kernel_size=a__ , stride=a__ , padding=a__ )
_A = norm_layer(a__ ) if norm_layer else nn.Identity()
def a_ ( self : Dict , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = self.projection(a__ )
_A = self.norm(a__ )
return embeddings
class snake_case ( nn.GroupNorm):
def __init__( self : Dict , a__ : Optional[int] , **a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__(1 , a__ , **a__ )
class snake_case ( nn.Module):
def __init__( self : int , a__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_A = nn.AvgPoolad(a__ , stride=1 , padding=pool_size // 2 , count_include_pad=a__ )
def a_ ( self : List[str] , a__ : int ) -> str:
'''simple docstring'''
return self.pool(a__ ) - hidden_states
class snake_case ( nn.Module):
def __init__( self : Tuple , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__()
_A = nn.Convad(a__ , a__ , 1 )
_A = nn.Convad(a__ , a__ , 1 )
_A = PoolFormerDropPath(a__ )
if isinstance(config.hidden_act , a__ ):
_A = ACTaFN[config.hidden_act]
else:
_A = config.hidden_act
def a_ ( self : List[Any] , a__ : int ) -> Dict:
'''simple docstring'''
_A = self.conva(a__ )
_A = self.act_fn(a__ )
_A = self.drop(a__ )
_A = self.conva(a__ )
_A = self.drop(a__ )
return hidden_states
class snake_case ( nn.Module):
def __init__( self : Union[str, Any] , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : List[str] , a__ : Optional[Any] , a__ : Tuple ) -> Dict:
'''simple docstring'''
super().__init__()
_A = PoolFormerPooling(a__ )
_A = PoolFormerOutput(a__ , a__ , a__ , a__ )
_A = PoolFormerGroupNorm(a__ )
_A = PoolFormerGroupNorm(a__ )
# Useful for training neural nets
_A = PoolFormerDropPath(a__ ) if drop_path > 0.0 else nn.Identity()
_A = config.use_layer_scale
if config.use_layer_scale:
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
_A = nn.Parameter(
config.layer_scale_init_value * torch.ones((a__) ) , requires_grad=a__ )
def a_ ( self : Union[str, Any] , a__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if self.use_layer_scale:
_A = self.pooling(self.before_norm(a__ ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_A = hidden_states + self.drop_path(a__ )
_A = ()
_A = self.output(self.after_norm(a__ ) )
_A = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_A = hidden_states + self.drop_path(a__ )
_A = (output,) + outputs
return outputs
else:
_A = self.drop_path(self.pooling(self.before_norm(a__ ) ) )
# First residual connection
_A = pooling_output + hidden_states
_A = ()
# Second residual connection inside the PoolFormerOutput block
_A = self.drop_path(self.output(self.after_norm(a__ ) ) )
_A = hidden_states + layer_output
_A = (output,) + outputs
return outputs
class snake_case ( nn.Module):
def __init__( self : str , a__ : int ) -> Any:
'''simple docstring'''
super().__init__()
_A = config
# stochastic depth decay rule
_A = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_A = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_A = nn.ModuleList(a__ )
# Transformer blocks
_A = []
_A = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_A = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
a__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(a__ ) )
_A = nn.ModuleList(a__ )
def a_ ( self : Tuple , a__ : Union[str, Any] , a__ : Tuple=False , a__ : List[str]=True ) -> List[Any]:
'''simple docstring'''
_A = () if output_hidden_states else None
_A = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_A , _A = layers
# Get patch embeddings from hidden_states
_A = embedding_layer(a__ )
# Send the embeddings through the blocks
for _, blk in enumerate(a__ ):
_A = blk(a__ )
_A = layer_outputs[0]
if output_hidden_states:
_A = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class snake_case ( _UpperCamelCase):
__UpperCamelCase = PoolFormerConfig
__UpperCamelCase = 'poolformer'
__UpperCamelCase = 'pixel_values'
__UpperCamelCase = True
def a_ ( self : Tuple , a__ : Dict ) -> Any:
'''simple docstring'''
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a_ ( self : int , a__ : Dict , a__ : int=False ) -> str:
'''simple docstring'''
if isinstance(a__ , a__ ):
_A = value
a_ = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , _UpperCamelCase , )
class snake_case ( _UpperCamelCase):
def __init__( self : int , a__ : Dict ) -> str:
'''simple docstring'''
super().__init__(a__ )
_A = config
_A = PoolFormerEncoder(a__ )
# Initialize weights and apply final processing
self.post_init()
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a_ ( self : Tuple , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_A = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_A = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=a__ , hidden_states=encoder_outputs.hidden_states , )
class snake_case ( nn.Module):
def __init__( self : List[str] , a__ : Dict ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_A = nn.Linear(config.hidden_size , config.hidden_size )
def a_ ( self : int , a__ : Tuple ) -> str:
'''simple docstring'''
_A = self.dense(a__ )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , _UpperCamelCase , )
class snake_case ( _UpperCamelCase):
def __init__( self : Tuple , a__ : str ) -> Optional[int]:
'''simple docstring'''
super().__init__(a__ )
_A = config.num_labels
_A = PoolFormerModel(a__ )
# Final norm
_A = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_A = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a_ ( self : int , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = self.poolformer(
a__ , output_hidden_states=a__ , return_dict=a__ , )
_A = outputs[0]
_A = self.classifier(self.norm(a__ ).mean([-2, -1] ) )
_A = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A = "single_label_classification"
else:
_A = "multi_label_classification"
if self.config.problem_type == "regression":
_A = MSELoss()
if self.num_labels == 1:
_A = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_A = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
_A = CrossEntropyLoss()
_A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A = BCEWithLogitsLoss()
_A = loss_fct(a__ , a__ )
if not return_dict:
_A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 163
| 1
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> None:
UpperCamelCase = generate_pascal_triangle(__UpperCamelCase )
for row_idx in range(__UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def lowercase__ ( __UpperCamelCase )-> list[list[int]]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCamelCase = []
for current_row_idx in range(__UpperCamelCase ):
UpperCamelCase = populate_current_row(__UpperCamelCase , __UpperCamelCase )
triangle.append(__UpperCamelCase )
return triangle
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase ,UpperCamelCase = 1, 1
for current_col_idx in range(1 , __UpperCamelCase ):
calculate_current_element(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return current_row
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> None:
UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase = above_to_left_elt + above_to_right_elt
def lowercase__ ( __UpperCamelCase )-> list[list[int]]:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
UpperCamelCase = [[1]]
for row_index in range(1 , __UpperCamelCase ):
UpperCamelCase = [0] + result[-1] + [0]
UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase = sum(divmod(__UpperCamelCase , 2 ) )
UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase = row_first_half + row_second_half
result.append(__UpperCamelCase )
return result
def lowercase__ ( )-> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__UpperCamelCase , __UpperCamelCase ) -> None:
UpperCamelCase = F"{func.__name__}({value})"
UpperCamelCase = timeit(F"__main__.{call}" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__UpperCamelCase , __UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE__ = 'docs/source/en/_toctree.yml'
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = defaultdict(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(__UpperCamelCase )
UpperCamelCase = new_doc_list
UpperCamelCase = [key for key, value in counts.items() if value > 1]
UpperCamelCase = []
for duplicate_key in duplicates:
UpperCamelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(__UpperCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
UpperCamelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__UpperCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(__UpperCamelCase )
# Sort
return overview_doc
def lowercase__ ( __UpperCamelCase=False )-> List[str]:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase = api_doc[scheduler_idx]["""sections"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
UpperCamelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowercase__ ( __UpperCamelCase=False )-> Tuple:
with open(__UpperCamelCase , encoding="""utf-8""" ) as f:
UpperCamelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase = False
UpperCamelCase = api_doc[pipeline_idx]["""sections"""]
UpperCamelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase = pipeline_doc["""section"""]
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if overwrite:
UpperCamelCase = new_sub_pipeline_doc
new_pipeline_docs.append(__UpperCamelCase )
# sort overall pipeline doc
UpperCamelCase = clean_doc_toc(__UpperCamelCase )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase = True
if overwrite:
UpperCamelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase = api_doc
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 321
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class lowerCamelCase_( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ):
_lowerCamelCase = {}
_lowerCamelCase = {}
if prompt is not None:
_lowerCamelCase = prompt
if generate_kwargs is not None:
_lowerCamelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCamelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
_lowerCamelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ):
return super().__call__(__lowercase , **__lowercase )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = load_image(__lowercase )
if prompt is not None:
if not isinstance(__lowercase , __lowercase ):
raise ValueError(
F"""Received an invalid text input, got - {type(__lowercase )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
_lowerCamelCase = self.model.config.model_type
if model_type == "git":
_lowerCamelCase = self.image_processor(images=__lowercase , return_tensors=self.framework )
_lowerCamelCase = self.tokenizer(text=__lowercase , add_special_tokens=__lowercase ).input_ids
_lowerCamelCase = [self.tokenizer.cls_token_id] + input_ids
_lowerCamelCase = torch.tensor(__lowercase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
_lowerCamelCase = self.image_processor(images=__lowercase , header_text=__lowercase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCamelCase = self.image_processor(images=__lowercase , return_tensors=self.framework )
_lowerCamelCase = self.tokenizer(__lowercase , return_tensors=self.framework )
model_inputs.update(__lowercase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
_lowerCamelCase = self.image_processor(images=__lowercase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCamelCase = None
return model_inputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , __lowercase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
_lowerCamelCase = None
if generate_kwargs is None:
_lowerCamelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCamelCase = model_inputs.pop(self.model.main_input_name )
_lowerCamelCase = self.model.generate(__lowercase , **__lowercase , **__lowercase )
return model_outputs
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
for output_ids in model_outputs:
_lowerCamelCase = {
'''generated_text''': self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , )
}
records.append(__lowercase )
return records
| 350
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[Any] = ['pixel_values']
def __init__( self , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = PILImageResampling.BILINEAR , lowerCamelCase__ = True , lowerCamelCase__ = 1 / 2_5_5 , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 3_8_4}
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = do_resize
_lowerCamelCase = size
# Default value set here for backwards compatibility where the value in config is None
_lowerCamelCase = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_lowerCamelCase = resample
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = PILImageResampling.BICUBIC , lowerCamelCase__ = None , **lowerCamelCase__ , ):
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_lowerCamelCase = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_lowerCamelCase = int(shortest_edge / crop_pct )
_lowerCamelCase = get_resize_output_image_size(lowerCamelCase__ , size=lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase__ , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = ChannelDimension.FIRST , **lowerCamelCase__ , ):
_lowerCamelCase = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase = crop_pct if crop_pct is not None else self.crop_pct
_lowerCamelCase = resample if resample is not None else self.resample
_lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase = image_std if image_std is not None else self.image_std
_lowerCamelCase = size if size is not None else self.size
_lowerCamelCase = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_lowerCamelCase = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_lowerCamelCase = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCamelCase = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , crop_pct=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCamelCase = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCamelCase = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCamelCase = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 73
| 0
|
"""simple docstring"""
import math
import qiskit
def __a ( _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 ) ->qiskit.result.counts.Counts:
if (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
or isinstance(UpperCamelCase__ , UpperCamelCase__ )
or isinstance(UpperCamelCase__ , UpperCamelCase__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(UpperCamelCase__ ) != input_a)
or (math.floor(UpperCamelCase__ ) != input_a)
or (math.floor(UpperCamelCase__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
a__: int = qiskit.QuantumRegister(4 , 'qr' )
a__: Dict = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
a__: Union[str, Any] = [input_a, input_a, carry_in]
a__: str = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase__ ) # measure the last two qbits
a__: Tuple = qiskit.Aer.get_backend('aer_simulator' )
a__: List[Any] = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 290
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any=1024 ) -> Dict:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase , __lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase__ : List[str] ):
return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase = new_src + ' ' + src
__lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase , __lowerCamelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=128 )
parser.add_argument('--data_dir' , type=UpperCamelCase__ )
parser.add_argument('--save_path' , type=UpperCamelCase__ )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 90
| 0
|
import os
from typing import Dict, List, Tuple, TypeVar, Union
__A = TypeVar("T")
__A = Union[List[T], Tuple[T, ...]]
__A = Union[T, List[T], Dict[str, T]]
__A = Union[str, bytes, os.PathLike]
| 368
|
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Optional[int]:
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
for char in word:
lowerCamelCase_ = ord(__A )
if not _is_chinese_char(__A ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] ) -> Dict:
lowerCamelCase_ = set()
for token in tokens:
lowerCamelCase_ = len(__A ) > 1 and is_chinese(__A )
if chinese_word:
word_set.add(__A )
lowerCamelCase_ = list(__A )
return word_list
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] ) -> Any:
if not chinese_word_set:
return bert_tokens
lowerCamelCase_ = max([len(__A ) for w in chinese_word_set] )
lowerCamelCase_ = bert_tokens
lowerCamelCase_ , lowerCamelCase_ = 0, len(__A )
while start < end:
lowerCamelCase_ = True
if is_chinese(bert_word[start] ):
lowerCamelCase_ = min(end - start , __A )
for i in range(__A , 1 , -1 ):
lowerCamelCase_ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase_ = '##' + bert_word[j]
lowerCamelCase_ = start + i
lowerCamelCase_ = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ) -> Dict:
lowerCamelCase_ = []
for i in range(0 , len(__A ) , 100 ):
lowerCamelCase_ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
lowerCamelCase_ = [get_chinese_word(__A ) for r in res]
ltp_res.extend(__A )
assert len(__A ) == len(__A )
lowerCamelCase_ = []
for i in range(0 , len(__A ) , 100 ):
lowerCamelCase_ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__A , truncation=__A , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(__A ) == len(__A )
lowerCamelCase_ = []
for input_ids, chinese_word in zip(__A , __A ):
lowerCamelCase_ = []
for id in input_ids:
lowerCamelCase_ = bert_tokenizer._convert_id_to_token(__A )
input_tokens.append(__A )
lowerCamelCase_ = add_sub_symbol(__A , __A )
lowerCamelCase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__A ):
if token[:2] == "##":
lowerCamelCase_ = token[2:]
# save chinese tokens' pos
if len(__A ) == 1 and _is_chinese_char(ord(__A ) ):
ref_id.append(__A )
ref_ids.append(__A )
assert len(__A ) == len(__A )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> Dict:
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = [line.strip() for line in data if len(__A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase_ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase_ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase_ = prepare_ref(__A , __A , __A )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
lowerCamelCase_ = [json.dumps(__A ) + '\n' for ref in ref_ids]
f.writelines(__A )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
main(args)
| 183
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowercase_ = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase (__A , __A , __A , __A , __A , __A):
"""simple docstring"""
for attribute in key.split('''.'''):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_a = '''lm_head'''
_a = getattr(__A , __A)
if weight_type is not None:
_a = getattr(__A , __A).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
_a = True
if "*" in mapped_key:
_a = name.split(__A)[0].split('''.''')[-2]
_a = mapped_key.replace('''*''' , __A)
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = '''weight'''
else:
_a = None
set_recursively(__A , __A , __A , __A , __A , __A)
continue
if not is_used:
unused_weights.append(__A)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a = full_name.split('''conv_layers.''')[-1]
_a = name.split('''.''')
_a = int(items[0])
_a = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_a = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__A)
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A=None , __A=None , __A=True):
"""simple docstring"""
if config_path is not None:
_a = UniSpeechConfig.from_pretrained(__A)
else:
_a = UniSpeechConfig()
if is_finetuned:
if dict_path:
_a = Dictionary.load_from_json(__A)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols)
_a = os.path.join(__A , '''vocab.json''')
if not os.path.isdir(__A):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__A))
return
os.makedirs(__A , exist_ok=__A)
_a = target_dict.indices
# fairseq has the <pad> and <s> switched
_a = 42
_a = 43
with open(__A , '''w''' , encoding='''utf-8''') as vocab_handle:
json.dump(__A , __A)
_a = WavaVecaPhonemeCTCTokenizer(
__A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__A , )
_a = True if config.feat_extract_norm == '''layer''' else False
_a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
_a = WavaVecaProcessor(feature_extractor=__A , tokenizer=__A)
processor.save_pretrained(__A)
_a = UniSpeechForCTC(__A)
else:
_a = UniSpeechForPreTraining(__A)
if is_finetuned:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1]), '''w2v_path''': checkpoint_path})
else:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
_a = model[0].eval()
recursively_load_weights(__A , __A , __A)
hf_unispeech.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 211
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
UpperCAmelCase__ = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = RoFormerTokenizer
def __init__( self : Dict , A : Any=None , A : Optional[Any]=None , A : Union[str, Any]=True , A : List[str]="[UNK]" , A : List[str]="[SEP]" , A : Union[str, Any]="[PAD]" , A : Any="[CLS]" , A : str="[MASK]" , A : Optional[int]=True , A : str=None , **A : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , A) != do_lower_case
or pre_tok_state.get('strip_accents' , A) != strip_accents
):
_UpperCAmelCase = getattr(A , pre_tok_state.pop('type'))
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = pre_tok_class(**A)
_UpperCAmelCase = do_lower_case
def __getstate__( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = BertPreTokenizer()
return state
def __setstate__( self : List[Any] , A : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = d
_UpperCAmelCase = self.__dict__['_tokenizer'].get_vocab()
_UpperCAmelCase = PreTokenizer.custom(JiebaPreTokenizer(A))
def _lowerCamelCase ( self : List[Any] , A : Optional[int] , A : List[Any]=None) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Union[str, Any] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(A , name=A)
return tuple(A)
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : str=None , A : Union[str, Any]=None , A : Union[str, Any]=False , **A : Tuple , ) -> str:
"""simple docstring"""
_UpperCAmelCase = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A)
| 290
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''yolos'''
def __init__( self :Union[str, Any] , snake_case :Tuple=768 , snake_case :str=12 , snake_case :Optional[int]=12 , snake_case :List[Any]=3_072 , snake_case :Dict="gelu" , snake_case :Union[str, Any]=0.0 , snake_case :List[Any]=0.0 , snake_case :Union[str, Any]=0.02 , snake_case :Optional[int]=1e-12 , snake_case :str=[512, 864] , snake_case :str=16 , snake_case :List[Any]=3 , snake_case :Dict=True , snake_case :List[Any]=100 , snake_case :int=True , snake_case :List[Any]=False , snake_case :Optional[Any]=1 , snake_case :List[str]=5 , snake_case :Any=2 , snake_case :int=5 , snake_case :Any=2 , snake_case :Any=0.1 , **snake_case :Any , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : int = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : int = image_size
A_ : Dict = patch_size
A_ : Union[str, Any] = num_channels
A_ : str = qkv_bias
A_ : Dict = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : Union[str, Any] = auxiliary_loss
# Hungarian matcher
A_ : Union[str, Any] = class_cost
A_ : int = bbox_cost
A_ : Optional[Any] = giou_cost
# Loss coefficients
A_ : Optional[Any] = bbox_loss_coefficient
A_ : Optional[int] = giou_loss_coefficient
A_ : Tuple = eos_coefficient
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return 12
| 300
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300
| 1
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A_ : Dict = logging.getLogger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ,a_=-1 ) -> str:
# in NER datasets, the last column is usually reserved for NER label
_UpperCAmelCase : str = label_idx
def _snake_case ( self ,a_ ,a_ ) -> List[InputExample]:
if isinstance(a_ ,a_ ):
_UpperCAmelCase : str = mode.value
_UpperCAmelCase : List[str] = os.path.join(a_ ,f'''{mode}.txt''' )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[Any] = []
with open(a_ ,encoding="""utf-8""" ) as f:
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Any = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' ,words=a_ ,labels=a_ ) )
guid_index += 1
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = []
else:
_UpperCAmelCase : List[str] = line.split(""" """ )
words.append(splits[0] )
if len(a_ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" ,"""""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' ,words=a_ ,labels=a_ ) )
return examples
def _snake_case ( self ,a_ ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(a_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_UpperCAmelCase : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(a_ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" ,line.split()[0] )
def _snake_case ( self ,a_ ) -> List[str]:
if path:
with open(a_ ,"""r""" ) as f:
_UpperCAmelCase : Any = f.read().splitlines()
if "O" not in labels:
_UpperCAmelCase : int = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _snake_case ( self ,a_ ) -> List[str]:
if path:
with open(a_ ,"""r""" ) as f:
_UpperCAmelCase : str = f.read().splitlines()
if "O" not in labels:
_UpperCAmelCase : int = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
def _snake_case ( self ,a_ ,a_ ) -> List[InputExample]:
if isinstance(a_ ,a_ ):
_UpperCAmelCase : List[str] = mode.value
_UpperCAmelCase : int = os.path.join(a_ ,f'''{mode}.txt''' )
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : List[str] = []
with open(a_ ,encoding="""utf-8""" ) as f:
for sentence in parse_incr(a_ ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(a_ ) == len(a_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' ,words=a_ ,labels=a_ ) )
guid_index += 1
return examples
def _snake_case ( self ,a_ ,a_ ,a_ ) -> List[str]:
_UpperCAmelCase : Dict = 0
for sentence in parse_incr(a_ ):
_UpperCAmelCase : Dict = preds_list[example_id]
_UpperCAmelCase : Dict = """"""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(a_ )
example_id += 1
def _snake_case ( self ,a_ ) -> List[str]:
if path:
with open(a_ ,"""r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 349
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = len(snake_case_ ) + 1
__lowerCAmelCase = len(snake_case_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCAmelCase = [[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
# since string of zero length match pattern of zero length
__lowerCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case_ ):
__lowerCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case_ ):
__lowerCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case_ ):
for j in range(1 , snake_case_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCAmelCase = dp[i - 1][j]
else:
__lowerCAmelCase = 0
else:
__lowerCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_A : List[Any] = 'aab'
_A : Optional[int] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}')
| 229
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __magic_name__ ( A : Union[str, Any], A : str, A : Optional[int]=None, A : List[str]=None ):
'''simple docstring'''
if attention_mask is None:
a = tf.cast(tf.math.not_equal(A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = OPTConfig
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : List[str] = """gelu"""
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : int=4 , __lowerCamelCase : Any=4 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Dict=20 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=16 , ) -> Any:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = embed_dim
a = word_embed_proj_dim
a = False
def __UpperCAmelCase ( self : str ) -> int:
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowerCamelCase , **self.config_updates , )
a = prepare_opt_inputs_dict(__lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ) -> List[str]:
a = TFOPTModel(config=__lowerCamelCase )
a = inputs_dict["input_ids"]
a = input_ids[:1, :]
a = inputs_dict["attention_mask"][:1, :]
a = 1
# first forward pass
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : List[str] = 10
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = TFOPTModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowerCamelCase : Tuple , __lowerCamelCase : int ):
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowerCamelCase , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a = model_class(config=__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowerCamelCase )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
a = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowerCamelCase )
# check that weights remain the same after resizing
a = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowerCamelCase )
a = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a = False
self.assertTrue(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
return tf.constant(A, dtype=tf.intaa )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 99
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a = input_ids.shape[0]
a = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = TFOPTModel.from_pretrained("facebook/opt-350m" )
a = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a = tf.not_equal(__lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
a = model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ).last_hidden_state
a = (1, 11, 5_12)
self.assertEqual(output.shape , __lowerCamelCase )
a = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-3 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = xla_generate(__lowerCamelCase , __lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-2 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
super().setUp()
a = "facebook/opt-350m"
def __UpperCAmelCase ( self : Any ) -> Tuple:
a = TFOPTForCausalLM.from_pretrained(self.path_model )
a = GPTaTokenizer.from_pretrained(self.path_model )
a = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
a = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
@require_tf
@slow
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a = "facebook/opt-125m"
a = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> Dict:
a = "facebook/opt-350m"
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
a = "left"
# use different length sentences to test batching
a = [
"Hello, my dog is a little",
"Today, I",
]
a = tokenizer(__lowerCamelCase , return_tensors="tf" , padding=__lowerCamelCase )
a = inputs["input_ids"]
a = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["attention_mask"] )
a = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase )
a = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
a = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
a = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
a = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
a = "facebook/opt-350m"
a = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
a = []
a = GPTaTokenizer.from_pretrained(__lowerCamelCase )
a = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
a = tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
a = model.generate(__lowerCamelCase , max_length=10 )
a = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 107
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"])
parser.add_argument("--model_name", default="roberta-large", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
__A : Any = parser.parse_args()
if args.model_type == "roberta":
__A : str = RobertaForMaskedLM.from_pretrained(args.model_name)
__A : List[str] = "roberta"
elif args.model_type == "gpt2":
__A : Dict = GPTaLMHeadModel.from_pretrained(args.model_name)
__A : Optional[int] = "transformer"
__A : List[Any] = model.state_dict()
__A : Union[str, Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__A : Optional[int] = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__A : Any = f'''{prefix}.embeddings.{w}.weight'''
__A : Dict = state_dict[param_name]
for w in ["weight", "bias"]:
__A : Union[str, Any] = f'''{prefix}.embeddings.LayerNorm.{w}'''
__A : str = state_dict[param_name]
# Transformer Blocks #
__A : Optional[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__A : Union[str, Any] = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
__A : str = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__A : Optional[int] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__A : Optional[Any] = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__A : Tuple = state_dict[f'''lm_head.dense.{w}''']
__A : Tuple = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__A : Any = state_dict[f'''{prefix}.ln_f.{w}''']
__A : Optional[int] = state_dict["lm_head.weight"]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 351
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str=False )->Optional[Any]:
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=1_3 , __UpperCamelCase : Any=7 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=9_9 , __UpperCamelCase : Optional[int]=3_2 , __UpperCamelCase : Union[str, Any]=3_2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Optional[Any]=3_7 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[Any]=5_1_2 , __UpperCamelCase : Any=1_6 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[str]=None , )->Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertModel(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->Tuple:
_UpperCAmelCase = TFMobileBertForMaskedLM(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict )->List[Any]:
_UpperCAmelCase = TFMobileBertForPreTraining(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] )->Any:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] )->List[str]:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=__UpperCamelCase )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any )->Dict:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] )->List[Any]:
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__UpperCamelCase )
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[str] )->Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def lowercase__ ( self : List[Any] )->List[str]:
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def lowercase__ ( self : Any )->Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def lowercase__ ( self : Any )->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def lowercase__ ( self : Dict )->Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Tuple )->List[str]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 326
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> List[str]:
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("sample_euler" )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=__a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Any:
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("sample_euler" )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=__a , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def snake_case_ (self ) -> str:
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=__a , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 153
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a ) -> str:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self , __a = 1 , __a = 1_00 , __a = None , __a = None , __a = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
UpperCamelCase = int(__a )
if sample_size % down_scale_factor != 0:
UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process." )
UpperCamelCase = int(__a )
UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__a )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCamelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
UpperCamelCase = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 153
| 1
|
'''simple docstring'''
def a ( lowerCamelCase__ = 1_00_00_00 ):
'''simple docstring'''
A_ : List[str] = 1
A_ : Tuple = 1
A_ : Any = {1: 1}
for inputa in range(2 , lowerCamelCase__ ):
A_ : Optional[Any] = 0
A_ : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
A_ : int = (3 * number) + 1
counter += 1
if inputa not in counters:
A_ : Optional[Any] = counter
if counter > pre_counter:
A_ : Dict = inputa
A_ : List[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 135
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['prompt']
__SCREAMING_SNAKE_CASE : str = ['prompt']
__SCREAMING_SNAKE_CASE : Dict = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
@property
def _a (self ):
return 32
@property
def _a (self ):
return 32
@property
def _a (self ):
return self.time_input_dim * 4
@property
def _a (self ):
return 8
@property
def _a (self ):
A_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ : List[str] = PriorTransformer(**lowercase )
return model
@property
def _a (self ):
torch.manual_seed(0 )
A_ : str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ : Dict = ShapERenderer(**lowercase )
return model
def _a (self ):
A_ : Optional[int] = self.dummy_prior
A_ : Optional[int] = self.dummy_text_encoder
A_ : int = self.dummy_tokenizer
A_ : Dict = self.dummy_renderer
A_ : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
A_ : Union[str, Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a (self , lowercase , lowercase=0 ):
if str(lowercase ).startswith("""mps""" ):
A_ : Any = torch.manual_seed(lowercase )
else:
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[str] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a (self ):
A_ : str = """cpu"""
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**lowercase )
A_ : str = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Dict = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : Dict = output.images[0]
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ : Tuple = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a (self ):
A_ : Tuple = torch_device == """cpu"""
A_ : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def _a (self ):
A_ : List[Any] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**lowercase )
A_ : Any = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = 1
A_ : Union[str, Any] = 2
A_ : Dict = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
A_ : Optional[Any] = batch_size * [inputs[key]]
A_ : List[Any] = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
A_ : Tuple = ShapEPipeline.from_pretrained("""openai/shap-e""" )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = torch.Generator(device=lowercase ).manual_seed(0 )
A_ : List[str] = pipe(
"""a shark""" , generator=lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 135
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any ) -> Any:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Dict ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
_a : Tuple =VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : int =TFVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any]=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : Dict =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[Any] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : List[str] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Any:
'''simple docstring'''
_a : List[Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Tuple ={'vision_model': vision_model, 'text_model': text_model}
_a : Dict =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE )
_a : Dict =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Optional[Any]=None , **SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
'''simple docstring'''
_a : int =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[Any] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
_a : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Tuple =model(input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
_a : List[str] =after_output[0].numpy()
_a : Optional[int] =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-5 )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> List[Any]:
'''simple docstring'''
_a : List[Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[str] =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : Tuple =model(
input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
_a : List[Any] =output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Any =to_atuple(vision_model.config.image_size )
_a : List[Any] =to_atuple(vision_model.config.patch_size )
_a : Optional[int] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a : Tuple =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a : Union[str, Any] =output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Dict:
'''simple docstring'''
_a : Tuple =np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f"Difference between torch and flax is {diff} (>= {tol})." )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
_a : Optional[Any] =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Optional[int] =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : List[Any] =self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :str ) -> Dict:
'''simple docstring'''
_a : Any =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : int =self.get_pretrained_model_and_inputs()
_a : str =model_a(**SCREAMING_SNAKE_CASE )
_a : List[Any] =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE )
_a : List[Any] =TFVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =model_a(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =after_outputs[0].numpy()
_a : Tuple =np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1e-5 )
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
_a : Tuple =1_3
_a : Tuple =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : int =random_attention_mask([batch_size, 4] )
_a : Any ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int ) -> int:
'''simple docstring'''
_a : int =TFViTModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : Union[str, Any] =TFBertModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :List[str] ) -> Dict:
'''simple docstring'''
_a : Optional[int] =TFViTModelTester(self )
_a : str =TFBertModelTester(self )
_a : int =vit_model_tester.prepare_config_and_inputs()
_a : str =bert_model_tester.prepare_config_and_inputs()
_a : Dict =vision_config_and_inputs
(
_a
) : Union[str, Any] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :List[Any] ) -> int:
'''simple docstring'''
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_a : int =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
_a : Tuple =1_3
_a : Tuple =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Tuple =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : str =random_attention_mask([batch_size, 4] )
_a : Optional[Any] ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str=None , **SCREAMING_SNAKE_CASE :Optional[int] ) -> List[str]:
'''simple docstring'''
_a : Union[str, Any] =self.get_vision_text_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =TFVisionTextDualEncoderModel(vision_model=SCREAMING_SNAKE_CASE , text_model=SCREAMING_SNAKE_CASE )
_a : List[Any] =model(
input_ids=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a : Any =to_atuple(vision_model.config.image_size )
_a : Union[str, Any] =to_atuple(vision_model.config.patch_size )
_a : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a : Optional[int] =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a : List[Any] =output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Dict ) -> Any:
'''simple docstring'''
_a : List[Any] =TFDeiTModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : List[str] =TFRobertaModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =TFDeiTModelTester(self )
_a : Union[str, Any] =TFRobertaModelTester(self )
_a : Optional[int] =vit_model_tester.prepare_config_and_inputs()
_a : int =bert_model_tester.prepare_config_and_inputs()
_a : Dict =vision_config_and_inputs
(
_a
) : Dict =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Dict =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
_a : Any =1_3
_a : List[Any] =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_a : Any =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_a : List[Any] =random_attention_mask([batch_size, 4] )
_a : List[Any] ={'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : Optional[int] =TFCLIPVisionModel(SCREAMING_SNAKE_CASE , name="""vision_model""" )
_a : List[str] =TFBertModel(SCREAMING_SNAKE_CASE , name="""text_model""" )
return vision_model, text_model
def __UpperCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
_a : Union[str, Any] =TFCLIPVisionModelTester(self )
_a : Optional[Any] =TFBertModelTester(self )
_a : str =clip_model_tester.prepare_config_and_inputs()
_a : Optional[int] =bert_model_tester.prepare_config_and_inputs()
_a : List[Any] =vision_config_and_inputs
(
_a
) : List[str] =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : List[str] =TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=SCREAMING_SNAKE_CASE )
_a : str =VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
_a : Optional[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_a : List[Any] =processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="""np""" )
_a : List[Any] =model(**SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a : int =np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 276
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
lowercase__ = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
lowercase__ = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _snake_case ( lowercase__ , lowercase__ , lowercase__="binary" ):
_lowerCamelCase : str = simple_accuracy(lowercase__ , lowercase__ )
_lowerCamelCase : Any = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ , average=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = {}
for id_pred, label in zip(lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCamelCase : Union[str, Any] = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCamelCase : Optional[Any] = [(pred, label)]
_lowerCamelCase, _lowerCamelCase : Optional[int] = [], []
for question, preds_labels in question_map.items():
_lowerCamelCase, _lowerCamelCase : Tuple = zip(*lowercase__ )
_lowerCamelCase : List[str] = fa_score(y_true=lowercase__ , y_pred=lowercase__ , average='macro' )
fas.append(lowercase__ )
_lowerCamelCase : int = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase__ ) )
ems.append(lowercase__ )
_lowerCamelCase : Optional[Any] = float(sum(lowercase__ ) / len(lowercase__ ) )
_lowerCamelCase : Optional[int] = sum(lowercase__ ) / len(lowercase__ )
_lowerCamelCase : List[Any] = float(fa_score(y_true=lowercase__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def A_ ( self ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def A_ ( self ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def A_ ( self , lowercase , lowercase ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "cb":
return acc_and_fa(lowercase , lowercase , fa_avg='macro' )
elif self.config_name == "record":
_lowerCamelCase : List[str] = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCamelCase : Union[str, Any] = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(lowercase , lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase , lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 96
| 0
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=6_4 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=[1, 1_6, 4, 4] , _lowerCamelCase=None , ):
UpperCamelCase_: List[str] = parent
UpperCamelCase_: Dict = batch_size
UpperCamelCase_: str = image_size
UpperCamelCase_: Tuple = patch_size
UpperCamelCase_: List[Any] = num_channels
UpperCamelCase_: Union[str, Any] = is_training
UpperCamelCase_: str = use_labels
UpperCamelCase_: str = hidden_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: Any = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_act
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: str = type_sequence_label_size
UpperCamelCase_: str = initializer_range
UpperCamelCase_: Optional[Any] = scope
UpperCamelCase_: List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCamelCase_: Union[str, Any] = (self.image_size // 3_2) ** 2
UpperCamelCase_: int = num_patches + 1
def _a ( self ):
UpperCamelCase_: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: int = None
if self.use_labels:
UpperCamelCase_: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
UpperCamelCase_: int = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 1_6, 3_2],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_lowerCamelCase , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[Any] = ViTHybridModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[str] = self.type_sequence_label_size
UpperCamelCase_: Dict = ViTHybridForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_: List[str] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ):
UpperCamelCase_: List[str] = self.prepare_config_and_inputs()
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[int] = config_and_inputs
UpperCamelCase_: int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] =(ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
a : str =(
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
a : Optional[int] =False
a : Tuple =False
a : int =False
def _a ( self ):
UpperCamelCase_: Dict = ViTHybridModelTester(self )
UpperCamelCase_: int = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Dict = model_class(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: List[str] = [*signature.parameters.keys()]
UpperCamelCase_: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_: List[Any] = _config_zero_init(_lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase_: Optional[Any] = model_class(config=_lowerCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCamelCase_: List[str] = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: str = ViTHybridModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case () -> List[str]:
UpperCamelCase_: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ):
UpperCamelCase_: Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowerCamelCase )
UpperCamelCase_: List[str] = self.default_image_processor
UpperCamelCase_: Optional[Any] = prepare_img()
UpperCamelCase_: Dict = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_: Optional[Any] = model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_: Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_: int = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def _a ( self ):
UpperCamelCase_: Dict = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
UpperCamelCase_: Optional[int] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
UpperCamelCase_: Dict = prepare_img()
UpperCamelCase_: int = image_processor(images=_lowerCamelCase , return_tensors='pt' )
UpperCamelCase_: Optional[int] = model(**_lowerCamelCase )
UpperCamelCase_: List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCamelCase_: Optional[int] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 292
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__="no" , UpperCAmelCase__="29500" ) -> List[Any]:
UpperCamelCase_: Any = False
UpperCamelCase_: List[str] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
UpperCamelCase_: List[Any] = True
elif "IPython" in sys.modules:
UpperCamelCase_: List[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
UpperCamelCase_: Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
UpperCamelCase_: List[str] = 8
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_: Tuple = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 ) -> Optional[int]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
| 292
| 1
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __snake_case ( _UpperCAmelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _A ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
super().__init__()
__a = module
__a = nn.Sequential(
nn.Linear(module.in_features , __UpperCamelCase , bias=__UpperCamelCase) , nn.Linear(__UpperCamelCase , module.out_features , bias=__UpperCamelCase) , )
__a = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCamelCase)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.module(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase) + self.adapter(__UpperCamelCase)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
UpperCamelCase__ : Union[str, Any] = '''bigscience/bloom-1b7'''
# Constant values
UpperCamelCase__ : Optional[int] = 2.109659552692574
UpperCamelCase__ : List[str] = '''Hello my name is'''
UpperCamelCase__ : Tuple = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCamelCase__ : Tuple = 10
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = AutoTokenizer.from_pretrained(self.model_name)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''')
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_abit.config
self.assertTrue(hasattr(__UpperCamelCase , '''quantization_config'''))
__a = config.to_dict()
__a = config.to_diff_dict()
__a = config.to_json_string()
def _lowerCamelCase ( self : int):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__a = self.model_fpaa.get_memory_footprint()
__a = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
__a = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def _lowerCamelCase ( self : int):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCamelCase , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
__a = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase) , self.EXPECTED_OUTPUTS)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = BitsAndBytesConfig()
__a = True
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
__a = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCamelCase) , self.EXPECTED_OUTPUTS)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCamelCase)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = BitsAndBytesConfig()
with self.assertRaises(__UpperCamelCase):
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCamelCase , load_in_abit=__UpperCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
with self.assertRaises(__UpperCamelCase):
# Tries with `str`
self.model_abit.to('''cpu''')
with self.assertRaises(__UpperCamelCase):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(__UpperCamelCase):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0'''))
with self.assertRaises(__UpperCamelCase):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCamelCase):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
__a = self.model_fpaa.to(torch.floataa)
__a = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
# Check this does not throw an error
__a = self.model_fpaa.to('''cpu''')
# Check this does not throw an error
__a = self.model_fpaa.half()
# Check this does not throw an error
__a = self.model_fpaa.float()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCamelCase , device_map='''auto''')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : Optional[Any]):
'''simple docstring'''
__a = '''t5-small'''
__a = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__a = AutoTokenizer.from_pretrained(cls.model_name)
__a = '''Translate in German: Hello, my dog is cute'''
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : str):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__a = TaForConditionalGeneration._keep_in_fpaa_modules
__a = None
# test with `t5-small`
__a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
# test with `flan-t5-small`
__a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
__a = modules
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
# test with `flan-t5-small`
__a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
__a = self.tokenizer(self.input_text , return_tensors='''pt''').to(0)
__a = model.generate(**__UpperCamelCase)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# model_name
__a = '''bigscience/bloom-560m'''
__a = '''t5-small'''
# Different types of model
__a = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# Sequence classification model
__a = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# CausalLM model
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
# Seq2seq model
__a = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCamelCase , device_map='''auto''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__a = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class _A ( lowercase_ ):
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCamelCase , device_map='''balanced''')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
__a = self.tokenizer(self.input_text , return_tensors='''pt''')
# Second real batch
__a = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCamelCase) , self.EXPECTED_OUTPUTS)
class _A ( lowercase_ ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''facebook/opt-350m'''
super().setUp()
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''')) < version.parse('''0.37.0'''):
return
# Step 1: freeze all parameters
__a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCamelCase)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
__a = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__a = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCamelCase)):
__a = LoRALayer(module.q_proj , rank=16)
__a = LoRALayer(module.k_proj , rank=16)
__a = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
__a = self.tokenizer('''Test batch ''' , return_tensors='''pt''').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__a = model.forward(**__UpperCamelCase)
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCamelCase , __UpperCamelCase):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(__UpperCamelCase , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class _A ( lowercase_ ):
UpperCamelCase__ : str = '''gpt2-xl'''
UpperCamelCase__ : List[Any] = 3.3191854854152187
| 49
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ):
A = 1.0 if scale is None else scale
A = 0.0 if loc is None else loc
super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] )
@property
def lowerCamelCase ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCamelCase ( self :Optional[int] ):
return self.base_dist.variance * self.scale**2
@property
def lowerCamelCase ( self :Dict ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ):
super().__init__(**__UpperCamelCase )
A = args_dim
A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] )
A = domain_map
def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ):
A = [proj(__UpperCamelCase ) for proj in self.proj]
return self.domain_map(*__UpperCamelCase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self :Dict , __UpperCamelCase :int ):
super().__init__()
A = function
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ):
return self.function(__UpperCamelCase , *__UpperCamelCase )
class _UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Any , __UpperCamelCase :int = 1 ):
A = dim
A = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ):
if self.dim == 1:
return self.distribution_class(*__UpperCamelCase )
else:
return Independent(self.distribution_class(*__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ):
A = self._base_distribution(__UpperCamelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim )
@property
def lowerCamelCase ( self :List[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def lowerCamelCase ( self :Tuple ):
return len(self.event_shape )
@property
def lowerCamelCase ( self :int ):
return 0.0
def lowerCamelCase ( self :str , __UpperCamelCase :int ):
return ParameterProjection(
in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def lowerCamelCase ( __UpperCamelCase :torch.Tensor ):
return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCamelCase = StudentT
@classmethod
def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
A = 2.0 + cls.squareplus(__UpperCamelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"loc": 1, "scale": 1}
UpperCamelCase = Normal
@classmethod
def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = {"total_count": 1, "logits": 1}
UpperCamelCase = NegativeBinomial
@classmethod
def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ):
A = cls.squareplus(__UpperCamelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ):
A, A = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase )
else:
return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ):
A, A = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 292
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , __a : List[str] , __a : Dict=13 , __a : Union[str, Any]=7 , __a : Tuple=True , __a : List[Any]=True , __a : Dict=True , __a : List[str]=True , __a : Optional[int]=99 , __a : int=32 , __a : int=2 , __a : Optional[int]=4 , __a : Union[str, Any]=37 , __a : Optional[int]="gelu" , __a : Tuple=0.1 , __a : Dict=0.1 , __a : Union[str, Any]=512 , __a : List[str]=16 , __a : Any=2 , __a : Dict=0.02 , __a : Any=False , __a : Dict=True , __a : Tuple="None" , __a : List[str]=3 , __a : List[Any]=4 , __a : Optional[int]=None , ) -> str:
"""simple docstring"""
__lowercase : str = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : List[Any] = seq_length
__lowercase : Optional[int] = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : int = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : int = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[str] = num_labels
__lowercase : Any = num_choices
__lowercase : Optional[Any] = relative_attention
__lowercase : List[str] = position_biased_input
__lowercase : List[str] = pos_att_type
__lowercase : Dict = scope
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : str = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Optional[Any] = None
__lowercase : Any = None
__lowercase : Any = None
if self.use_labels:
__lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : str = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : int = TFDebertaVaModel(config=__a )
__lowercase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Optional[int] = [input_ids, input_mask]
__lowercase : str = model(__a )
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Tuple , __a : Union[str, Any] , __a : List[str] , __a : Any , __a : Optional[int] , __a : Tuple , __a : Dict ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = TFDebertaVaForMaskedLM(config=__a )
__lowercase : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Optional[Any] , __a : Tuple , __a : Dict , __a : Dict , __a : Optional[int] , __a : int ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Tuple = TFDebertaVaForSequenceClassification(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : Any = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] , __a : Dict , __a : str , __a : Tuple , __a : int ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : List[str] = TFDebertaVaForTokenClassification(config=__a )
__lowercase : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __a : Optional[Any] , __a : Optional[Any] , __a : Tuple , __a : List[Any] , __a : Optional[Any] , __a : Optional[int] , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = TFDebertaVaForQuestionAnswering(config=__a )
__lowercase : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__lowercase : int = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[Any] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : List[Any] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = TFDebertaVaModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__a )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
__lowercase : List[str] = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowercase : str = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase : Tuple = model(__a , attention_mask=__a )[0]
__lowercase : int = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __a , atol=1E-4 )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "unispeech"
def __init__( self: Any , UpperCamelCase: int=32 , UpperCamelCase: Tuple=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: Union[str, Any]=12 , UpperCamelCase: str=30_72 , UpperCamelCase: Union[str, Any]="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: str=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.02 , UpperCamelCase: Optional[int]=1e-5 , UpperCamelCase: Optional[int]="group" , UpperCamelCase: List[Any]="gelu" , UpperCamelCase: List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase: str=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase: List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase: Tuple=False , UpperCamelCase: Dict=1_28 , UpperCamelCase: Optional[int]=16 , UpperCamelCase: Tuple=False , UpperCamelCase: List[Any]=True , UpperCamelCase: str=0.05 , UpperCamelCase: Optional[int]=10 , UpperCamelCase: Union[str, Any]=2 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: List[Any]=10 , UpperCamelCase: Union[str, Any]=0 , UpperCamelCase: List[str]=3_20 , UpperCamelCase: Union[str, Any]=2 , UpperCamelCase: int=0.1 , UpperCamelCase: int=1_00 , UpperCamelCase: Any=2_56 , UpperCamelCase: str=2_56 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: List[Any]="mean" , UpperCamelCase: Optional[Any]=False , UpperCamelCase: str=False , UpperCamelCase: Dict=2_56 , UpperCamelCase: List[Any]=80 , UpperCamelCase: Union[str, Any]=0 , UpperCamelCase: Any=1 , UpperCamelCase: Any=2 , UpperCamelCase: int=0.5 , **UpperCamelCase: Union[str, Any] , ) -> Tuple:
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase )
snake_case__ = list(UpperCamelCase )
snake_case__ = list(UpperCamelCase )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = feat_quantizer_dropout
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# pretraining loss
snake_case__ = replace_prob
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 307
|
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307
| 1
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__A = threading.Lock()
__A = None
__A = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__A = logging.WARNING
__A = True
def a__ ( ) -> Dict:
__lowerCAmelCase: List[str] = os.getenv("TRANSFORMERS_VERBOSITY" , __SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def a__ ( ) -> str:
return __name__.split("." )[0]
def a__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__lowerCAmelCase: str = logging.StreamHandler() # Set sys.stderr as stream.
__lowerCAmelCase: Any = sys.stderr.flush
# Apply our default configuration to the library root logger.
__lowerCAmelCase: List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__lowerCAmelCase: List[Any] = False
def a__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
__lowerCAmelCase: Dict = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__lowerCAmelCase: str = None
def a__ ( ) -> int:
return log_levels
def a__ ( __SCREAMING_SNAKE_CASE = None ) -> logging.Logger:
if name is None:
__lowerCAmelCase: Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> Any:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> Dict:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> str:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> Union[str, Any]:
return set_verbosity(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
_configure_library_root_logger()
__lowerCAmelCase: Optional[Any] = False
def a__ ( ) -> None:
_configure_library_root_logger()
__lowerCAmelCase: Union[str, Any] = True
def a__ ( ) -> None:
__lowerCAmelCase: Any = _get_library_root_logger().handlers
for handler in handlers:
__lowerCAmelCase: str = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> None:
__lowerCAmelCase: List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__SCREAMING_SNAKE_CASE )
def a__ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Any = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__A = warning_advice
@functools.lru_cache(__SCREAMING_SNAKE_CASE )
def a__ ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
self.warning(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__A = warning_once
class snake_case :
def __init__( self : Optional[int] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str)-> Union[str, Any]: # pylint: disable=unused-argument
'''simple docstring'''
__lowerCAmelCase: Dict = args[0] if args else None
def __iter__( self : Optional[Any])-> int:
'''simple docstring'''
return iter(self._iterator)
def __getattr__( self : Tuple , UpperCamelCase__ : Tuple)-> int:
'''simple docstring'''
def empty_fn(*UpperCamelCase__ : str , **UpperCamelCase__ : List[Any]): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict)-> List[str]:
'''simple docstring'''
return self
def __exit__( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict)-> str:
'''simple docstring'''
return
class snake_case :
def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCamelCase__ , **UpperCamelCase__)
else:
return EmptyTqdm(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Tuple)-> List[Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A = _tqdm_cls()
def a__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> Dict:
global _tqdm_active
__lowerCAmelCase: Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> int:
global _tqdm_active
__lowerCAmelCase: str = False
hf_hub_utils.disable_progress_bars()
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 108
| 0
|
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase__ ):
__lowerCamelCase : Tuple = """vision-encoder-decoder"""
__lowerCamelCase : Any = True
def __init__( self , **_lowerCAmelCase ) -> Any:
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_lowerCAmelCase = kwargs.pop("encoder" )
_lowerCAmelCase = encoder_config.pop("model_type" )
_lowerCAmelCase = kwargs.pop("decoder" )
_lowerCAmelCase = decoder_config.pop("model_type" )
_lowerCAmelCase = AutoConfig.for_model(_A , **_A )
_lowerCAmelCase = AutoConfig.for_model(_A , **_A )
_lowerCAmelCase = True
@classmethod
def _snake_case ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) -> PretrainedConfig:
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_lowerCAmelCase = True
_lowerCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.encoder.to_dict()
_lowerCAmelCase = self.decoder.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
class lowerCAmelCase_ ( lowercase__ ):
__lowerCamelCase : List[str] = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ) -> float:
return 1E-4
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class lowerCAmelCase_ ( lowercase__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_lowerCAmelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
import torch
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
_lowerCAmelCase = dummy_input['input_ids'].shape
_lowerCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_lowerCAmelCase = dummy_input.pop("input_ids" )
_lowerCAmelCase = dummy_input.pop("attention_mask" )
_lowerCAmelCase = torch.zeros(_A )
return common_inputs
class lowerCAmelCase_ ( lowercase__ ):
@property
def _snake_case ( self ) -> None:
pass
def _snake_case ( self , _lowerCAmelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = "default" ) -> OnnxConfig:
_lowerCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A )
| 158
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 331
| 0
|
import doctest
from collections import deque
import numpy as np
class __magic_name__ :
def __init__( self )-> None:
UpperCamelCase_ = [2, 1, 2, -1]
UpperCamelCase_ = [1, 2, 3, 4]
def UpperCAmelCase_ ( self )-> list[float]:
UpperCamelCase_ = len(self.first_signal )
UpperCamelCase_ = len(self.second_signal )
UpperCamelCase_ = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
UpperCamelCase_ = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
UpperCamelCase_ = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase_ = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 60
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE :List[str] = logging.getLogger(__name__)
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=SCREAMING_SNAKE_CASE_ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=SCREAMING_SNAKE_CASE_ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=SCREAMING_SNAKE_CASE_ , default=1_0_0_0 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=SCREAMING_SNAKE_CASE_ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=SCREAMING_SNAKE_CASE_ , default=5_1_2 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=SCREAMING_SNAKE_CASE_ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
UpperCamelCase_ = parser.parse_args()
return args
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
def fn(SCREAMING_SNAKE_CASE_ ):
return tokenizer(examples["text"] )
return fn
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
UpperCamelCase_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
UpperCamelCase_ = tf.train.Features(feature=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = tf.train.Example(features=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = example.SerializeToString()
records.append(SCREAMING_SNAKE_CASE_ )
return records
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase_ = min(len(SCREAMING_SNAKE_CASE_ ) , args.limit )
UpperCamelCase_ = dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase_ = os.path.join(args.output_dir , args.split )
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase_ = tokenize_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(SCREAMING_SNAKE_CASE_ ):
# Concatenate all texts.
UpperCamelCase_ = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase_ = {
k: [t[i : i + args.max_length] for i in range(0 , SCREAMING_SNAKE_CASE_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase_ = dataset_tokenized.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=1_0_0_0 , num_proc=4 )
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for shard in range(0 , len(SCREAMING_SNAKE_CASE_ ) , args.shard_size ):
UpperCamelCase_ = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase_ = len(dataset_snapshot["input_ids"] )
UpperCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase_ = get_serialized_examples(SCREAMING_SNAKE_CASE_ )
with tf.io.TFRecordWriter(SCREAMING_SNAKE_CASE_ ) as out_file:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
UpperCamelCase_ = serialized_examples[i]
out_file.write(SCREAMING_SNAKE_CASE_ )
print("Wrote file {} containing {} records".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , "w" ) as f:
print(f"Total {args.split} records: {total_records}" , file=SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = parse_args()
main(args)
| 60
| 1
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_SCREAMING_SNAKE_CASE : Any = False
try:
_SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self , a__ = None , a__ = [] ) -> List[str]:
'''simple docstring'''
snake_case_ = 0
snake_case_ = choices
snake_case_ = prompt
if sys.platform == "win32":
snake_case_ = "*"
else:
snake_case_ = "➔ "
def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int:
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a__ )
else:
forceWrite(self.choices[index] , a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a__ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]:
'''simple docstring'''
snake_case_ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a__ )
move_cursor(a__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = int(chr(self.current_selection ) )
snake_case_ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a__ )
else:
return
else:
return
def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]:
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
snake_case_ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a__ )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case_ = int(builtins.input() )
except ValueError:
snake_case_ = default_choice
else:
snake_case_ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a__ , "\n" )
return choice
| 85
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_SCREAMING_SNAKE_CASE : str = ['''text''', '''image''', '''audio''']
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(a__ , a__ ):
inputs.append(create_inputs(a__ ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for output in outputs:
if isinstance(a__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(a__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(a__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCAmelCase__ :
"""simple docstring"""
def lowercase_ ( self : str ) -> Tuple:
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
SCREAMING_SNAKE_CASE__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , __UpperCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase_ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = self.tool(*__UpperCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertListEqual(output_types(__UpperCamelCase ) , self.tool.outputs )
def lowercase_ ( self : List[Any] ) -> Dict:
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def lowercase_ ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = self.tool(*__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertEqual(len(__UpperCamelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(__UpperCamelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__UpperCamelCase , __UpperCamelCase ) )
def lowercase_ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = []
for _input, input_type in zip(__UpperCamelCase , self.tool.inputs ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ = self.tool(*__UpperCamelCase )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertEqual(len(__UpperCamelCase ) , len(self.tool.outputs ) )
| 360
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : Tuple=0.01 , __lowerCamelCase : Optional[Any]=1000 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = p_stop
SCREAMING_SNAKE_CASE__ = max_length
def __iter__( self : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE__ = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=True ) -> Dict:
SCREAMING_SNAKE_CASE__ = [
BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE__ = [list(__lowerCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCamelCase ) for shard in batch_sampler_shards] , [len(__lowerCamelCase ) for e in expected] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> int:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase )
def lowercase_ ( self : str ) -> Dict:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , even_batches=__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [[], []]
self.check_batch_sampler_shards(__lowerCamelCase , __lowerCamelCase , split_batches=__lowerCamelCase , even_batches=__lowerCamelCase )
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE__ = [BatchSamplerShard(__lowerCamelCase , 2 , __lowerCamelCase , even_batches=__lowerCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict=False ) -> str:
random.seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = list(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [
IterableDatasetShard(
__lowerCamelCase , batch_size=__lowerCamelCase , drop_last=__lowerCamelCase , num_processes=__lowerCamelCase , process_index=__lowerCamelCase , split_batches=__lowerCamelCase , )
for i in range(__lowerCamelCase )
]
SCREAMING_SNAKE_CASE__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCamelCase )
iterable_dataset_lists.append(list(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(len(__lowerCamelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE__ = []
for idx in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCamelCase ) < len(__lowerCamelCase ):
reference += reference
self.assertListEqual(__lowerCamelCase , reference[: len(__lowerCamelCase )] )
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
self.check_iterable_dataset_shards(__lowerCamelCase , __lowerCamelCase , batch_size=4 , drop_last=__lowerCamelCase , split_batches=__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = SkipBatchSampler(__lowerCamelCase , 2 )
self.assertListEqual(list(__lowerCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE__ = skip_first_batches(__lowerCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self : Union[str, Any] ) -> str:
Accelerator()
SCREAMING_SNAKE_CASE__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 218
| 0
|
from importlib import import_module
from .logging import get_logger
UpperCamelCase_ = get_logger(__name__)
class _snake_case :
'''simple docstring'''
def __init__( self: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=None ) -> int:
UpperCAmelCase_ : int = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self ,lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = module._original_module if isinstance(lowerCamelCase_ ,_PatchedModuleObj ) else module
class _snake_case :
'''simple docstring'''
A__ : List[Any] = []
def __init__( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple=None ) -> Dict:
UpperCAmelCase_ : Optional[Any] = obj
UpperCAmelCase_ : Optional[Any] = target
UpperCAmelCase_ : List[str] = new
UpperCAmelCase_ : Union[str, Any] = target.split(""".""" )[0]
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : Any = attrs or []
def __enter__( self: Tuple ) -> Dict:
*UpperCAmelCase_ , UpperCAmelCase_ : int = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCamelCase_ ) ):
try:
UpperCAmelCase_ : int = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCAmelCase_ : Any = getattr(self.obj ,lowerCamelCase_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCamelCase_ ,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCAmelCase_ : str = obj_attr
# patch at top level
setattr(self.obj ,lowerCamelCase_ ,_PatchedModuleObj(lowerCamelCase_ ,attrs=self.attrs ) )
UpperCAmelCase_ : str = getattr(self.obj ,lowerCamelCase_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCamelCase_ ,lowerCamelCase_ ,_PatchedModuleObj(getattr(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) ,attrs=self.attrs ) )
UpperCAmelCase_ : str = getattr(lowerCamelCase_ ,lowerCamelCase_ )
# finally set the target attribute
setattr(lowerCamelCase_ ,lowerCamelCase_ ,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCAmelCase_ : Dict = getattr(import_module(""".""".join(lowerCamelCase_ ) ) ,lowerCamelCase_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj ,lowerCamelCase_ ) is attr_value:
UpperCAmelCase_ : Dict = getattr(self.obj ,lowerCamelCase_ )
setattr(self.obj ,lowerCamelCase_ ,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCAmelCase_ : Any = globals()["""__builtins__"""][target_attr]
setattr(self.obj ,lowerCamelCase_ ,self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self: int ,*lowerCamelCase_: str ) -> Optional[Any]:
for attr in list(self.original ):
setattr(self.obj ,lowerCamelCase_ ,self.original.pop(lowerCamelCase_ ) )
def A__ ( self: str ) -> int:
self.__enter__()
self._active_patches.append(self )
def A__ ( self: Any ) -> Any:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 345
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> str:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : List[str] = mock.Mock()
UpperCAmelCase_ : List[Any] = 500
UpperCAmelCase_ : Union[str, Any] = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A__ ( self: str ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : str = mock.Mock()
UpperCAmelCase_ : Optional[int] = 500
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Union[str, Any] = HTTPError
UpperCAmelCase_ : List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Optional[int] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" ,return_value=lowerCamelCase_ ) as mock_head:
UpperCAmelCase_ : Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def A__ ( self: str ) -> Dict:
# This test is for deprecated behavior and can be removed in v5
try:
UpperCAmelCase_ : Any = tempfile.mktemp()
with open(lowerCamelCase_ ,"""wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = AlbertTokenizer.from_pretrained(lowerCamelCase_ )
finally:
os.remove(lowerCamelCase_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" ,"""wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" ,lowerCamelCase_ )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def A__ ( self: List[str] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : str = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : str = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A__ ( cls: Dict ) -> Optional[int]:
UpperCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def A__ ( cls: Optional[Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def A__ ( self: Any ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : List[Any] = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""test-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase_ ,repo_id="""test-tokenizer""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def A__ ( self: Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Dict = BertTokenizer(lowerCamelCase_ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" ,use_auth_token=self._token )
UpperCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase_ ,repo_id="""valid_org/test-tokenizer-org""" ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def A__ ( self: Optional[int] ) -> Optional[Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = CustomTokenizer(lowerCamelCase_ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowerCamelCase_ ,"""vocab.txt""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : str = BertTokenizerFast.from_pretrained(lowerCamelCase_ )
bert_tokenizer.save_pretrained(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = CustomTokenizerFast.from_pretrained(lowerCamelCase_ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" ,use_auth_token=self._token )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizerFast""" )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' ,use_fast=lowerCamelCase_ ,trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"""CustomTokenizer""" )
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data ,{"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) ,["""[CLS]""", """ This is a """, """extra_id_100"""] )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Dict = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) ,["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) ,["""BC""", """A"""] )
def A__ ( self: Union[str, Any] ) -> int:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) ,["""This is something """, """[SPECIAL_TOKEN]"""] )
def A__ ( self: int ) -> List[str]:
UpperCAmelCase_ : int = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) ,["""AB""", """C"""] )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) ,["""ABC""", """D"""] )
def A__ ( self: List[Any] ) -> Any:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCAmelCase_ : Tuple = Trie()
UpperCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase_ ,["""AB""", """C"""] )
| 345
| 1
|
from math import factorial, radians
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 18 , __lowerCAmelCase = 10 ) -> float:
'''simple docstring'''
lowercase_ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
lowercase_ = radians(__lowerCAmelCase )
lowercase_ = angle_in_radians
lowercase_ = 3
lowercase_ = -1
for _ in range(__lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase )
lowercase_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 361
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 313
| 0
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
UpperCAmelCase__ : Any = 'bert-base-cased'
UpperCAmelCase__ : int = 'fp16'
UpperCAmelCase__ : Dict = 'bf16'
UpperCAmelCase__ : List[str] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''{i + 1}'''
SCREAMING_SNAKE_CASE__ : int = strategy
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prefetch_policy
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : List[str] = state_dict_type
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ : int = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Optional[int] = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """2000"""
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ : List[str] = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : str = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ : List[Any] = """T5Layer"""
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(SCREAMING_SNAKE_CASE__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ : Dict = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : List[Any] = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ : int = """0"""
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(SCREAMING_SNAKE_CASE__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __magic_name__ (self ) -> int:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ : List[Any] = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Optional[int] = mp_dtype
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ : str = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ : int = torch.bfloataa
SCREAMING_SNAKE_CASE__ : List[Any] = MixedPrecision(param_dtype=SCREAMING_SNAKE_CASE__ , reduce_dtype=SCREAMING_SNAKE_CASE__ , buffer_dtype=SCREAMING_SNAKE_CASE__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , SCREAMING_SNAKE_CASE__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , SCREAMING_SNAKE_CASE__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ : Tuple = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ : Any = str(SCREAMING_SNAKE_CASE__ ).lower()
with mockenv_context(**SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=SCREAMING_SNAKE_CASE__ ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0.82
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ : Dict = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ : List[str] = 1_60
SCREAMING_SNAKE_CASE__ : Any = 1_60
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ : Optional[int] = cmd.copy()
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ : List[str] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ : List[Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ : Dict = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ : int = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ : List[Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(SCREAMING_SNAKE_CASE__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
| 25
|
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
def __init__( self : List[str] , __lowercase : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
__lowercase =[]
__lowercase =list(__lowercase )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.__components )
def __str__( self : int ):
"""simple docstring"""
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self : List[Any] , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception('must have the same size' )
def __sub__( self : str , __lowercase : Vector ):
"""simple docstring"""
__lowercase =len(self )
if size == len(__lowercase ):
__lowercase =[self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : int , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , (float, int) ):
__lowercase =[c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
__lowercase =len(self )
__lowercase =[self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception('invalid operand!' )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return Vector(self.__components )
def snake_case ( self : str , __lowercase : int ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def snake_case ( self : List[str] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
__lowercase =value
def snake_case ( self : Tuple ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__lowercase =[c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def snake_case ( self : List[Any] , __lowercase : Vector , __lowercase : bool = False ):
"""simple docstring"""
__lowercase =self * other
__lowercase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ )
return Vector([0] * dimension )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
assert isinstance(lowercase__, lowercase__ ) and (isinstance(lowercase__, lowercase__ ))
__lowercase =[0] * dimension
__lowercase =1
return Vector(lowercase__ )
def __UpperCamelCase ( lowercase__ : float, lowercase__ : Vector, lowercase__ : Vector ):
'''simple docstring'''
assert (
isinstance(lowercase__, lowercase__ )
and isinstance(lowercase__, lowercase__ )
and (isinstance(lowercase__, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )]
return Vector(lowercase__ )
class lowerCAmelCase :
def __init__( self : Dict , __lowercase : list[list[float]] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
__lowercase =matrix
__lowercase =w
__lowercase =h
def __str__( self : Optional[Any] ):
"""simple docstring"""
__lowercase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Union[str, Any] , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : int , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
__lowercase =[]
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : str , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : Tuple , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
__lowercase =zero_vector(self.__height )
for i in range(self.__height ):
__lowercase =[
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
__lowercase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def snake_case ( self : int ):
"""simple docstring"""
return self.__height
def snake_case ( self : List[str] ):
"""simple docstring"""
return self.__width
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int , __lowercase : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowercase =value
else:
raise Exception('change_component: indices out of bounds' )
def snake_case ( self : Dict , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__lowercase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
__lowercase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Union[str, Any] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception('Indices out of bounds' )
def snake_case ( self : Tuple ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowercase =[
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
__lowercase =[[0] * n for _ in range(lowercase__ )]
return Matrix(lowercase__, lowercase__, lowercase__ )
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : int, lowercase__ : int ):
'''simple docstring'''
random.seed(lowercase__ )
__lowercase =[
[random.randint(lowercase__, lowercase__ ) for _ in range(lowercase__ )] for _ in range(lowercase__ )
]
return Matrix(lowercase__, lowercase__, lowercase__ )
| 141
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a : Tuple = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
a : int = None
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : List[str] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_lowercase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_lowercase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase : Union[str, Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __lowerCamelCase ( _lowercase ) -> Dict:
def remove_articles(_lowercase ):
return ARTICLES_REGEX.sub(""" """ , _lowercase )
def white_space_fix(_lowercase ):
return " ".join(text.split() )
def remove_punc(_lowercase ):
UpperCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) )
def __lowerCamelCase ( _lowercase ) -> List[str]:
if not s:
return []
return normalize_answer(_lowercase ).split()
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
return int(normalize_answer(_lowercase ) == normalize_answer(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = get_tokens(_lowercase )
UpperCAmelCase : str = get_tokens(_lowercase )
UpperCAmelCase : Dict = collections.Counter(_lowercase ) & collections.Counter(_lowercase )
UpperCAmelCase : Any = sum(common.values() )
if len(_lowercase ) == 0 or len(_lowercase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase : Tuple = 1.0 * num_same / len(_lowercase )
UpperCAmelCase : Union[str, Any] = 1.0 * num_same / len(_lowercase )
UpperCAmelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : int = {}
UpperCAmelCase : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase : str = qa["""id"""]
UpperCAmelCase : str = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_lowercase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase : Dict = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCAmelCase : Optional[Any] = preds[qid]
# Take max over all gold answers
UpperCAmelCase : List[Any] = max(compute_exact(_lowercase , _lowercase ) for a in gold_answers )
UpperCAmelCase : str = max(compute_fa(_lowercase , _lowercase ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : List[Any] = {}
for qid, s in scores.items():
UpperCAmelCase : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase : str = s
return new_scores
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None ) -> Dict:
if not qid_list:
UpperCAmelCase : Tuple = len(_lowercase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCAmelCase : int = len(_lowercase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
for k in new_eval:
UpperCAmelCase : int = new_eval[k]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
plt.step(_lowercase , _lowercase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_lowercase , _lowercase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_lowercase )
plt.savefig(_lowercase )
plt.clf()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None ) -> str:
UpperCAmelCase : Optional[Any] = sorted(_lowercase , key=lambda _lowercase : na_probs[k] )
UpperCAmelCase : List[str] = 0.0
UpperCAmelCase : List[Any] = 1.0
UpperCAmelCase : Union[str, Any] = 0.0
UpperCAmelCase : List[str] = [1.0]
UpperCAmelCase : Union[str, Any] = [0.0]
UpperCAmelCase : Optional[int] = 0.0
for i, qid in enumerate(_lowercase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase : int = true_pos / float(i + 1 )
UpperCAmelCase : List[str] = true_pos / float(_lowercase )
if i == len(_lowercase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowercase )
recalls.append(_lowercase )
if out_image:
plot_pr_curve(_lowercase , _lowercase , _lowercase , _lowercase )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if out_image_dir and not os.path.exists(_lowercase ):
os.makedirs(_lowercase )
UpperCAmelCase : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase : List[str] = make_precision_recall_eval(
_lowercase , _lowercase , _lowercase , _lowercase , out_image=os.path.join(_lowercase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCAmelCase : Optional[Any] = make_precision_recall_eval(
_lowercase , _lowercase , _lowercase , _lowercase , out_image=os.path.join(_lowercase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCAmelCase : Union[str, Any] = {k: float(_lowercase ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase : int = make_precision_recall_eval(
_lowercase , _lowercase , _lowercase , _lowercase , out_image=os.path.join(_lowercase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_lowercase , _lowercase , """pr_exact""" )
merge_eval(_lowercase , _lowercase , """pr_f1""" )
merge_eval(_lowercase , _lowercase , """pr_oracle""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
if not qid_list:
return
UpperCAmelCase : int = [na_probs[k] for k in qid_list]
UpperCAmelCase : Tuple = np.ones_like(_lowercase ) / float(len(_lowercase ) )
plt.hist(_lowercase , weights=_lowercase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_lowercase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
UpperCAmelCase : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase : Optional[Any] = num_no_ans
UpperCAmelCase : Any = cur_score
UpperCAmelCase : int = 0.0
UpperCAmelCase : str = sorted(_lowercase , key=lambda _lowercase : na_probs[k] )
for i, qid in enumerate(_lowercase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase : Optional[int] = scores[qid]
else:
if preds[qid]:
UpperCAmelCase : Optional[int] = -1
else:
UpperCAmelCase : int = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase : List[str] = cur_score
UpperCAmelCase : str = na_probs[qid]
return 100.0 * best_score / len(_lowercase ), best_thresh
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = find_best_thresh(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase , UpperCAmelCase : Any = find_best_thresh(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[str] = best_exact
UpperCAmelCase : str = exact_thresh
UpperCAmelCase : Any = best_fa
UpperCAmelCase : str = fa_thresh
def __lowerCamelCase ( ) -> Dict:
with open(OPTS.data_file ) as f:
UpperCAmelCase : List[str] = json.load(_lowercase )
UpperCAmelCase : List[str] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCAmelCase : Tuple = json.load(_lowercase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase : Union[str, Any] = json.load(_lowercase )
else:
UpperCAmelCase : Union[str, Any] = {k: 0.0 for k in preds}
UpperCAmelCase : Dict = make_qid_to_has_ans(_lowercase ) # maps qid to True/False
UpperCAmelCase : Tuple = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase : Tuple = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase , UpperCAmelCase : List[str] = get_raw_scores(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = apply_no_ans_threshold(_lowercase , _lowercase , _lowercase , OPTS.na_prob_thresh )
UpperCAmelCase : int = apply_no_ans_threshold(_lowercase , _lowercase , _lowercase , OPTS.na_prob_thresh )
UpperCAmelCase : int = make_eval_dict(_lowercase , _lowercase )
if has_ans_qids:
UpperCAmelCase : Dict = make_eval_dict(_lowercase , _lowercase , qid_list=_lowercase )
merge_eval(_lowercase , _lowercase , """HasAns""" )
if no_ans_qids:
UpperCAmelCase : List[str] = make_eval_dict(_lowercase , _lowercase , qid_list=_lowercase )
merge_eval(_lowercase , _lowercase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , OPTS.out_image_dir )
histogram_na_prob(_lowercase , _lowercase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_lowercase , _lowercase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_lowercase , _lowercase )
else:
print(json.dumps(_lowercase , indent=2 ) )
if __name__ == "__main__":
a : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 338
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Optional[int] = 1_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase : Tuple = two_third + 1
else:
UpperCAmelCase : int = one_third + 1
UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : str = (left + right) // 3 + 1
UpperCAmelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = input("""Enter numbers separated by comma:\n""").strip()
a : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 338
| 1
|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : Any = data
# Initialize hash values
__lowerCamelCase : Union[str, Any] = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
__lowerCamelCase : Union[str, Any] = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
__lowerCamelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowercase_ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
__lowerCamelCase : Union[str, Any] = B'\x80' + (B'\x00' * (63 - (len(SCREAMING_SNAKE_CASE_ ) + 8) % 64))
__lowerCamelCase : List[Any] = struct.pack('>Q' , (len(SCREAMING_SNAKE_CASE_ ) * 8) )
return data + padding + big_endian_integer
def lowercase_ ( self ) -> None:
# Convert into blocks of 64 bytes
__lowerCamelCase : List[str] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__lowerCamelCase : Optional[int] = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE_ ) )
# add 48 0-ed integers
words += [0] * 48
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__lowerCamelCase : Tuple = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__lowerCamelCase : Optional[int] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__lowerCamelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
__lowerCamelCase : Optional[Any] = self.ror(SCREAMING_SNAKE_CASE_ , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE_ , 11 ) ^ self.ror(SCREAMING_SNAKE_CASE_ , 25 )
__lowerCamelCase : Any = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
__lowerCamelCase : Any = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
__lowerCamelCase : Optional[Any] = self.ror(SCREAMING_SNAKE_CASE_ , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE_ , 13 ) ^ self.ror(SCREAMING_SNAKE_CASE_ , 22 )
__lowerCamelCase : List[str] = (a & b) ^ (a & c) ^ (b & c)
__lowerCamelCase : List[Any] = (sa + maj) % 0X1_00_00_00_00
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
__lowerCamelCase : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
__lowerCamelCase : Any = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__lowerCamelCase : Any = ''.join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for value in self.hashes] )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return 0Xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> None:
import hashlib
__lowerCamelCase : Any = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE_ ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest() )
def UpperCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
__lowerCamelCase : Optional[int] = f.read()
else:
__lowerCamelCase : int = bytes(UpperCAmelCase_ , 'utf-8' )
print(SHAaaa(UpperCAmelCase_ ).hash )
if __name__ == "__main__":
main()
| 185
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
A__ : str = get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
__lowerCamelCase : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : Optional[int] = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Tuple = []
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = obj
__lowerCamelCase : List[Any] = target
__lowerCamelCase : Union[str, Any] = new
__lowerCamelCase : Union[str, Any] = target.split('.' )[0]
__lowerCamelCase : Dict = {}
__lowerCamelCase : Dict = attrs or []
def __enter__( self ) -> Optional[int]:
*__lowerCamelCase , __lowerCamelCase : int = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
__lowerCamelCase : Optional[int] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowerCamelCase : List[Any] = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowerCamelCase : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
__lowerCamelCase : str = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
__lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowerCamelCase : Union[str, Any] = getattr(import_module('.'.join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
__lowerCamelCase : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowerCamelCase : List[Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self ) -> Optional[int]:
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self ) -> str:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 185
| 1
|
import math
from datetime import datetime, timedelta
def __lowercase ( a__ ) -> datetime:
__SCREAMING_SNAKE_CASE = year % 19
__SCREAMING_SNAKE_CASE = year % 4
__SCREAMING_SNAKE_CASE = year % 7
__SCREAMING_SNAKE_CASE = math.floor(year / 1_00 )
__SCREAMING_SNAKE_CASE = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__SCREAMING_SNAKE_CASE = leap_day_inhibits / 4
__SCREAMING_SNAKE_CASE = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__SCREAMING_SNAKE_CASE = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__SCREAMING_SNAKE_CASE = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__SCREAMING_SNAKE_CASE = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(a__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(a__ , 4 , 18 )
else:
return datetime(a__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
lowerCAmelCase__ : str ='''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 118
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCAmelCase__ : Dict =random.Random()
if is_torch_available():
import torch
def __lowercase ( a__ , a__=1.0 , a__=None , a__=None ) -> Any:
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _A , _A=7 , _A=400 , _A=2_000 , _A=1 , _A=0.0 , _A=16_000 , _A=True , _A=True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
def _A ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _A ( self , _A=False , _A=False ):
'''simple docstring'''
def _flatten(_A ):
return list(itertools.chain(*_A ) )
if equal_length:
__SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ASTFeatureExtractor
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , padding=_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(_A )
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
__SCREAMING_SNAKE_CASE = feat_extract(_A , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
@require_torch
def _A ( self ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _A ( self , _A ):
'''simple docstring'''
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort('id' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(_A , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _A , atol=1e-4 ) )
| 118
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCAmelCase_( _lowercase ):
'''simple docstring'''
__lowercase : Optional[int] = "poolformer"
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=16 ,__UpperCAmelCase=16 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[64, 128, 320, 512] ,__UpperCAmelCase=[7, 3, 3, 3] ,__UpperCAmelCase=[4, 2, 2, 2] ,__UpperCAmelCase=[2, 1, 1, 1] ,__UpperCAmelCase=4 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=True ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=0.0_2 ,**__UpperCAmelCase ,) -> Tuple:
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : List[str] = stride
lowerCAmelCase__ : int = padding
lowerCAmelCase__ : Tuple = pool_size
lowerCAmelCase__ : List[Any] = hidden_sizes
lowerCAmelCase__ : Tuple = mlp_ratio
lowerCAmelCase__ : Union[str, Any] = depths
lowerCAmelCase__ : Optional[Any] = patch_sizes
lowerCAmelCase__ : Dict = strides
lowerCAmelCase__ : Optional[int] = num_encoder_blocks
lowerCAmelCase__ : Optional[Any] = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_layer_scale
lowerCAmelCase__ : Optional[Any] = layer_scale_init_value
lowerCAmelCase__ : Dict = initializer_range
super().__init__(**__lowerCAmelCase )
class lowerCAmelCase_( _lowercase ):
'''simple docstring'''
__lowercase : Any = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Dict:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return 2E-3
| 37
|
"""simple docstring"""
import unittest
import numpy as np
def snake_case_ ( A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray, A_ : np.ndarray | None = None, ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
_lowerCamelCase : List[str] = np.shape(A_ )
if shape_a[0] != shape_b[0]:
_lowerCamelCase : Tuple = (
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
_lowerCamelCase : Tuple = (
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
_lowerCamelCase : List[str] = pseudo_inv
if a_inv is None:
try:
_lowerCamelCase : Any = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : List[str] = np.array([[2, 1], [6, 3]] )
_lowerCamelCase : List[Any] = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = np.block([[a, b], [b.T, c]] )
_lowerCamelCase : Tuple = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : List[str] = np.linalg.det(__lowerCAmelCase )
_lowerCamelCase : Any = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : int = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowerCamelCase : List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
_lowerCamelCase : Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 72
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _a ( lowerCamelCase: Dict=None ) -> Tuple:
'''simple docstring'''
if subparsers is not None:
__A = subparsers.add_parser('''test''' )
else:
__A = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase: Optional[int] ) -> str:
'''simple docstring'''
__A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
__A = script_name
else:
__A = F"""--config_file={args.config_file} {script_name}"""
__A = ['''accelerate-launch'''] + test_args.split()
__A = execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _a ( ) -> str:
'''simple docstring'''
__A = test_command_parser()
__A = parser.parse_args()
test_command(lowerCamelCase )
if __name__ == "__main__":
main()
| 353
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case__ : Dict = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case__ : Any = 'main'
# Default branch name
snake_case__ : Union[str, Any] = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case__ : Optional[int] = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case__ : int = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case__ : Any = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def _a ( ) -> Tuple:
'''simple docstring'''
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ) -> Optional[int]:
'''simple docstring'''
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> Optional[Any]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class A_ ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :str , _UpperCamelCase :str )-> Optional[int]:
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> Union[str, Any]:
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def _lowerCAmelCase (self :int , _UpperCamelCase :Union[str, Any] )-> int:
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def _lowerCAmelCase (self :int )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_tf
def _lowerCAmelCase (self :Any )-> str:
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['''start_positions''', '''end_positions'''] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['''labels'''] )
@require_flax
def _lowerCAmelCase (self :Optional[int] )-> Dict:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class A_ ( _lowerCamelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 250
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""ViTFeatureExtractor"""]
_UpperCAmelCase = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 173
| 1
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=lowerCAmelCase__ , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=lowerCAmelCase__ , default=5 )
parser.add_argument('--batch_size' , type=lowerCAmelCase__ , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=lowerCAmelCase__ , default=1 )
parser.add_argument('--freeze' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument('--learning_rate' , type=lowerCAmelCase__ , default=5e-4 )
parser.add_argument('--seed' , type=lowerCAmelCase__ , default=0 )
parser.add_argument('--lr_scheduler_type' , type=lowerCAmelCase__ , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=lowerCAmelCase__ , default=10 )
parser.add_argument('--weight_decay' , type=lowerCAmelCase__ , default=0.01 )
parser.add_argument('--output_dir' , type=lowerCAmelCase__ , default='./results' )
return parser.parse_args()
lowercase__ : int = load("""accuracy""")
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = eval_pred
lowerCAmelCase_ : Optional[Any] = np.argmax(lowerCAmelCase__ , axis=1 )
return metric.compute(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__()
lowerCAmelCase_ : Dict = trainer
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : str ):
if control.should_evaluate:
lowerCAmelCase_ : Tuple = deepcopy(SCREAMING_SNAKE_CASE_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def UpperCamelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = get_args()
set_seed(args.seed )
lowerCAmelCase_ : Any = load_dataset('codeparrot/codecomplex' , split='train' )
lowerCAmelCase_ : Union[str, Any] = dataset.train_test_split(test_size=0.2 )
lowerCAmelCase_ : Optional[int] = train_test['test'].train_test_split(test_size=0.5 )
lowerCAmelCase_ : Tuple = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase_ : Union[str, Any] = tokenizer.eos_token
lowerCAmelCase_ : List[str] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCAmelCase_ : Union[str, Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(lowerCAmelCase__ : List[Any] ):
lowerCAmelCase_ : Optional[int] = tokenizer(example['src'] , truncation=lowerCAmelCase__ , max_length=1024 )
lowerCAmelCase_ : Dict = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCAmelCase_ : str = train_test_validation.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=train_test_validation['train'].column_names , )
lowerCAmelCase_ : Tuple = DataCollatorWithPadding(tokenizer=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
lowerCAmelCase_ : List[Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
print('Training...' )
trainer.add_callback(CustomCallback(lowerCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 289
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
"""simple docstring"""
pass
| 289
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : "DiagonalGaussianDistribution"
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case_ : Tuple = True
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 4 , snake_case__ : int = 32 , snake_case__ : int = 32 , snake_case__ : float = 0.18_215 , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_UpperCAmelCase = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
# pass init params to Decoder
_UpperCAmelCase = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , norm_num_groups=snake_case__ , act_fn=snake_case__ , )
_UpperCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_UpperCAmelCase = nn.Convad(snake_case__ , snake_case__ , 1 )
_UpperCAmelCase = False
_UpperCAmelCase = False
# only relevant if vae tiling is enabled
_UpperCAmelCase = self.config.sample_size
_UpperCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_UpperCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_UpperCAmelCase = 0.25
def UpperCamelCase ( self : Any , snake_case__ : str , snake_case__ : Dict=False ):
"""simple docstring"""
if isinstance(snake_case__ , (Encoder, Decoder) ):
_UpperCAmelCase = value
def UpperCamelCase ( self : Any , snake_case__ : bool = True ):
"""simple docstring"""
_UpperCAmelCase = use_tiling
def UpperCamelCase ( self : int ):
"""simple docstring"""
self.enable_tiling(snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = True
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = {}
def fn_recursive_add_processors(snake_case__ : str , snake_case__ : torch.nn.Module , snake_case__ : Dict[str, AttentionProcessor] ):
if hasattr(snake_case__ , "set_processor" ):
_UpperCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , snake_case__ , snake_case__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ )
return processors
def UpperCamelCase ( self : Dict , snake_case__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
_UpperCAmelCase = len(self.attn_processors.keys() )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(snake_case__ )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(snake_case__ : str , snake_case__ : torch.nn.Module , snake_case__ : int ):
if hasattr(snake_case__ , "set_processor" ):
if not isinstance(snake_case__ , snake_case__ ):
module.set_processor(snake_case__ )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , snake_case__ , snake_case__ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCamelCase ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(snake_case__ , return_dict=snake_case__ )
if self.use_slicing and x.shape[0] > 1:
_UpperCAmelCase = [self.encoder(snake_case__ ) for x_slice in x.split(1 )]
_UpperCAmelCase = torch.cat(snake_case__ )
else:
_UpperCAmelCase = self.encoder(snake_case__ )
_UpperCAmelCase = self.quant_conv(snake_case__ )
_UpperCAmelCase = DiagonalGaussianDistribution(snake_case__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(snake_case__ , return_dict=snake_case__ )
_UpperCAmelCase = self.post_quant_conv(snake_case__ )
_UpperCAmelCase = self.decoder(snake_case__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
@apply_forward_hook
def UpperCamelCase ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
_UpperCAmelCase = [self._decode(snake_case__ ).sample for z_slice in z.split(1 )]
_UpperCAmelCase = torch.cat(snake_case__ )
else:
_UpperCAmelCase = self._decode(snake_case__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = min(a.shape[2] , b.shape[2] , snake_case__ )
for y in range(snake_case__ ):
_UpperCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = min(a.shape[3] , b.shape[3] , snake_case__ )
for x in range(snake_case__ ):
_UpperCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCamelCase ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
"""simple docstring"""
_UpperCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_UpperCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
_UpperCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_UpperCAmelCase = []
for i in range(0 , x.shape[2] , snake_case__ ):
_UpperCAmelCase = []
for j in range(0 , x.shape[3] , snake_case__ ):
_UpperCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_UpperCAmelCase = self.encoder(snake_case__ )
_UpperCAmelCase = self.quant_conv(snake_case__ )
row.append(snake_case__ )
rows.append(snake_case__ )
_UpperCAmelCase = []
for i, row in enumerate(snake_case__ ):
_UpperCAmelCase = []
for j, tile in enumerate(snake_case__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_UpperCAmelCase = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__ )
if j > 0:
_UpperCAmelCase = self.blend_h(row[j - 1] , snake_case__ , snake_case__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case__ , dim=3 ) )
_UpperCAmelCase = torch.cat(snake_case__ , dim=2 )
_UpperCAmelCase = DiagonalGaussianDistribution(snake_case__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
"""simple docstring"""
_UpperCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_UpperCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
_UpperCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_UpperCAmelCase = []
for i in range(0 , z.shape[2] , snake_case__ ):
_UpperCAmelCase = []
for j in range(0 , z.shape[3] , snake_case__ ):
_UpperCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_UpperCAmelCase = self.post_quant_conv(snake_case__ )
_UpperCAmelCase = self.decoder(snake_case__ )
row.append(snake_case__ )
rows.append(snake_case__ )
_UpperCAmelCase = []
for i, row in enumerate(snake_case__ ):
_UpperCAmelCase = []
for j, tile in enumerate(snake_case__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_UpperCAmelCase = self.blend_v(rows[i - 1][j] , snake_case__ , snake_case__ )
if j > 0:
_UpperCAmelCase = self.blend_h(row[j - 1] , snake_case__ , snake_case__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(snake_case__ , dim=3 ) )
_UpperCAmelCase = torch.cat(snake_case__ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[torch.Generator] = None , ):
"""simple docstring"""
_UpperCAmelCase = sample
_UpperCAmelCase = self.encode(snake_case__ ).latent_dist
if sample_posterior:
_UpperCAmelCase = posterior.sample(generator=snake_case__ )
else:
_UpperCAmelCase = posterior.mode()
_UpperCAmelCase = self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 133
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
snake_case_ : Tuple = StableDiffusionLDMaDPipeline
snake_case_ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case_ : str = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCAmelCase = CLIPTextModel(snake_case__ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
_UpperCAmelCase = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase = ldmad_pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
_UpperCAmelCase = text_inputs["input_ids"].to(snake_case__ )
_UpperCAmelCase = ldmad_pipe.text_encoder(snake_case__ )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=snake_case__ )
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = "french fries"
_UpperCAmelCase = ldmad_pipe(**snake_case__ , negative_prompt=snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
_UpperCAmelCase = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : str , snake_case__ : Optional[int] , snake_case__ : Tuple="cpu" , snake_case__ : Any=torch.floataa , snake_case__ : Dict=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCAmelCase = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
_UpperCAmelCase = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : int="cpu" , snake_case__ : Optional[Any]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.495_586
_UpperCAmelCase = 0.33_795_515
_UpperCAmelCase = 112.48_518
_UpperCAmelCase = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4_194_127
_UpperCAmelCase = 0.35_375_586
_UpperCAmelCase = 0.5_638_502
_UpperCAmelCase = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 133
| 1
|
def _A ( _a : int = 1_0_0 ):
"""simple docstring"""
A = (n * (n + 1) // 2) ** 2
A = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 355
|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_ = IMAGENET_DEFAULT_STD ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
A = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77
| 0
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A_ : Optional[int] = Mapping[str, np.ndarray]
A_ : Union[str, Any] = Mapping[str, Any] # Is a nested dict.
A_ : Any = 0.01
@dataclasses.dataclass(frozen=_lowerCamelCase )
class lowercase :
"""simple docstring"""
UpperCAmelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase = None
# Chain corresponding to each parent
UpperCAmelCase = None
def snake_case_ ( lowerCAmelCase_ )-> Protein:
'''simple docstring'''
_UpperCAmelCase : List[Any] = R"""(\[[A-Z]+\]\n)"""
_UpperCAmelCase : List[str] = [tag.strip() for tag in re.split(lowerCAmelCase_ , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0]
_UpperCAmelCase : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
_UpperCAmelCase : List[str] = ["N", "CA", "C"]
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_UpperCAmelCase : List[str] = g[1][0].strip()
for i in range(len(lowerCAmelCase_ ) ):
if seq[i] not in residue_constants.restypes:
_UpperCAmelCase : Any = """X""" # FIXME: strings are immutable
_UpperCAmelCase : List[Any] = np.array(
[residue_constants.restype_order.get(lowerCAmelCase_ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_UpperCAmelCase : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(lowerCAmelCase_ , g[1][axis].split() ) ) )
_UpperCAmelCase : Tuple = np.array(lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_UpperCAmelCase : List[str] = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
_UpperCAmelCase : str = np.zeros(
(
len(lowerCAmelCase_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase : Any = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCAmelCase_ , atom_mask=lowerCAmelCase_ , aatype=lowerCAmelCase_ , residue_index=np.arange(len(lowerCAmelCase_ ) ) , b_factors=lowerCAmelCase_ , )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = 0 )-> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : int = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
_UpperCAmelCase : Tuple = prot.parents
_UpperCAmelCase : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_UpperCAmelCase : Tuple = [p for i, p in zip(lowerCAmelCase_ , lowerCAmelCase_ ) if i == chain_id]
if parents is None or len(lowerCAmelCase_ ) == 0:
_UpperCAmelCase : Optional[Any] = ["""N/A"""]
pdb_headers.append(F'''PARENT {' '.join(lowerCAmelCase_ )}''' )
return pdb_headers
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = pdb_str.split("""\n""" )
_UpperCAmelCase : Optional[Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
_UpperCAmelCase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_UpperCAmelCase : List[str] = []
if prot.parents_chain_index is not None:
_UpperCAmelCase : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCAmelCase_ ) , [] )
parent_dict[str(lowerCAmelCase_ )].append(lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = max([int(lowerCAmelCase_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_UpperCAmelCase : Optional[Any] = parent_dict.get(str(lowerCAmelCase_ ) , ["""N/A"""] )
parents_per_chain.append(lowerCAmelCase_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_UpperCAmelCase : Union[str, Any] = [["""N/A"""]]
def make_parent_line(lowerCAmelCase_ ) -> str:
return F'''PARENT {' '.join(lowerCAmelCase_ )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_UpperCAmelCase : int = 0
for i, l in enumerate(lowerCAmelCase_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCAmelCase_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = parents_per_chain[chain_counter]
else:
_UpperCAmelCase : Dict = ["""N/A"""]
out_pdb_lines.append(make_parent_line(lowerCAmelCase_ ) )
return "\n".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = residue_constants.restypes + ["""X"""]
def res_atoa(lowerCAmelCase_ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
_UpperCAmelCase : Optional[int] = residue_constants.atom_types
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = prot.atom_mask
_UpperCAmelCase : Tuple = prot.aatype
_UpperCAmelCase : Any = prot.atom_positions
_UpperCAmelCase : Optional[Any] = prot.residue_index.astype(np.intaa )
_UpperCAmelCase : Optional[int] = prot.b_factors
_UpperCAmelCase : Optional[int] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
_UpperCAmelCase : Any = get_pdb_headers(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
pdb_lines.extend(lowerCAmelCase_ )
_UpperCAmelCase : Any = aatype.shape[0]
_UpperCAmelCase : str = 1
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[Any] = string.ascii_uppercase
_UpperCAmelCase : List[Any] = None
# Add all atom sites.
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCAmelCase_ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_UpperCAmelCase : str = """ATOM"""
_UpperCAmelCase : Any = atom_name if len(lowerCAmelCase_ ) == 4 else F''' {atom_name}'''
_UpperCAmelCase : Dict = """"""
_UpperCAmelCase : Union[str, Any] = """"""
_UpperCAmelCase : Tuple = 1.0_0
_UpperCAmelCase : Dict = atom_name[0] # Protein supports only C, N, O, S, this works.
_UpperCAmelCase : Optional[Any] = """"""
_UpperCAmelCase : List[str] = """A"""
if chain_index is not None:
_UpperCAmelCase : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_UpperCAmelCase : Tuple = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(lowerCAmelCase_ )
atom_index += 1
_UpperCAmelCase : int = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : str = chain_index[i + 1]
if should_terminate:
# Close the chain.
_UpperCAmelCase : Tuple = """TER"""
_UpperCAmelCase : List[str] = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(lowerCAmelCase_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCAmelCase_ , lowerCAmelCase_ ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , )-> Protein:
'''simple docstring'''
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=lowerCAmelCase_ , remark=lowerCAmelCase_ , parents=lowerCAmelCase_ , parents_chain_index=lowerCAmelCase_ , )
| 215
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 )-> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[Any] = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 33
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Dict = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_snake_case : Optional[Any] = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = EfficientNetConfig()
__snake_case : Union[str, Any] = CONFIG_MAP[model_name]["hidden_dim"]
__snake_case : List[str] = CONFIG_MAP[model_name]["width_coef"]
__snake_case : str = CONFIG_MAP[model_name]["depth_coef"]
__snake_case : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
__snake_case : List[str] = CONFIG_MAP[model_name]["dropout_rate"]
__snake_case : Union[str, Any] = CONFIG_MAP[model_name]["dw_padding"]
__snake_case : List[Any] = "huggingface/label-files"
__snake_case : Optional[Any] = "imagenet-1k-id2label.json"
__snake_case : Tuple = 1_0_0_0
__snake_case : int = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__snake_case : Dict = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__snake_case : List[str] = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ):
__snake_case : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = CONFIG_MAP[model_name]["image_size"]
__snake_case : List[str] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_UpperCamelCase , )
return preprocessor
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[Any] = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
__snake_case : Optional[Any] = sorted(set(_UpperCamelCase ) )
__snake_case : List[Any] = len(_UpperCamelCase )
__snake_case : Optional[int] = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )}
__snake_case : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
__snake_case : List[str] = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
__snake_case : Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
__snake_case : str = "efficientnet." + item[1]
__snake_case : Dict = "classifier.weight"
__snake_case : Tuple = "classifier.bias"
return key_mapping
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
__snake_case : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
__snake_case : List[Any] = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__snake_case : str = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__snake_case : Tuple = torch.from_numpy(np.transpose(_UpperCamelCase ) )
else:
__snake_case : Optional[int] = torch.from_numpy(_UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_UpperCamelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = model_classes[model_name](
include_top=_UpperCamelCase , weights="imagenet" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="softmax" , )
__snake_case : Tuple = original_model.trainable_variables
__snake_case : List[str] = original_model.non_trainable_variables
__snake_case : int = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__snake_case : int = param.numpy()
__snake_case : str = list(tf_params.keys() )
# Load HuggingFace model
__snake_case : List[Any] = get_efficientnet_config(_UpperCamelCase )
__snake_case : Any = EfficientNetForImageClassification(_UpperCamelCase ).eval()
__snake_case : Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
__snake_case : List[Any] = rename_keys(_UpperCamelCase )
replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Initialize preprocessor and preprocess input image
__snake_case : Any = convert_image_processor(_UpperCamelCase )
__snake_case : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__snake_case : Tuple = hf_model(**_UpperCamelCase )
__snake_case : Any = outputs.logits.detach().numpy()
# Original model inference
__snake_case : Tuple = False
__snake_case : Tuple = CONFIG_MAP[model_name]["image_size"]
__snake_case : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__snake_case : Optional[Any] = image.img_to_array(_UpperCamelCase )
__snake_case : Dict = np.expand_dims(_UpperCamelCase , axis=0 )
__snake_case : List[str] = original_model.predict(_UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_UpperCamelCase ):
os.mkdir(_UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(_UpperCamelCase )
preprocessor.save_pretrained(_UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
__snake_case : Union[str, Any] = F'efficientnet-{model_name}'
preprocessor.push_to_hub(_UpperCamelCase )
hf_model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
_snake_case : Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 123
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> tuple[str, float]:
A: Tuple = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] )
return (item, float(__lowercase ))
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> tuple[str, str]:
A: Optional[Any] = random.randint(0 , len(__lowercase ) - 1 )
A: int = parent_a[:random_slice] + parent_a[random_slice:]
A: Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str:
A: Optional[int] = list(__lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
A: Any = random.choice(__lowercase )
return "".join(__lowercase )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , ) -> list[str]:
A: List[str] = []
# Generate more children proportionally to the fitness score.
A: List[str] = int(parent_a[1] * 1_0_0 ) + 1
A: Tuple = 1_0 if child_n >= 1_0 else child_n
for _ in range(__lowercase ):
A: List[str] = population_score[random.randint(0 , __lowercase )][0]
A , A: List[str] = crossover(parent_a[0] , __lowercase )
# Append new string to the population list.
pop.append(mutate(__lowercase , __lowercase ) )
pop.append(mutate(__lowercase , __lowercase ) )
return pop
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
A: str = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(__lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
A: List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A: Any = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(__lowercase )
# Generate random starting population.
A: Optional[Any] = []
for _ in range(__lowercase ):
population.append(''''''.join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
A , A: Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A: Optional[int] = [evaluate(__lowercase , __lowercase ) for item in population]
# Check if there is a matching evolution.
A: Tuple = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A: Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowercase )
# Normalize population score to be between 0 and 1.
A: Union[str, Any] = [
(item, score / len(__lowercase )) for item, score in population_score
]
# This is selection
for i in range(__lowercase ):
population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
UpperCamelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
UpperCamelCase , UpperCamelCase , UpperCamelCase = basic(target_str, genes_list)
print(
f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 334
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334
| 1
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = VideoMAEConfig()
set_architecture_configs(__UpperCAmelCase, __UpperCAmelCase )
if "finetuned" not in model_name:
snake_case_ = False
if "finetuned" in model_name:
snake_case_ = '''huggingface/label-files'''
if "kinetics" in model_name:
snake_case_ = 400
snake_case_ = '''kinetics400-id2label.json'''
elif "ssv2" in model_name:
snake_case_ = 174
snake_case_ = '''something-something-v2-id2label.json'''
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if "small" in model_name:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 16
snake_case_ = 12
snake_case_ = 3
snake_case_ = 192
snake_case_ = 768
elif "large" in model_name:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = 12
snake_case_ = 8
snake_case_ = 512
snake_case_ = 2048
elif "huge" in model_name:
snake_case_ = 1280
snake_case_ = 5120
snake_case_ = 32
snake_case_ = 16
snake_case_ = 12
snake_case_ = 8
snake_case_ = 640
snake_case_ = 2560
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if "encoder." in name:
snake_case_ = name.replace('''encoder.''', '''''' )
if "cls_token" in name:
snake_case_ = name.replace('''cls_token''', '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
snake_case_ = name.replace('''decoder_pos_embed''', '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
snake_case_ = name.replace('''pos_embed''', '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case_ = name.replace('''patch_embed.proj''', '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case_ = name.replace('''patch_embed.norm''', '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
snake_case_ = name.replace('''decoder.blocks''', '''decoder.decoder_layers''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''videomae.encoder.layer''' )
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
snake_case_ = name.replace('''attn''', '''attention.self''' )
if "attn" in name:
snake_case_ = name.replace('''attn''', '''attention.attention''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' )
if "decoder_embed" in name:
snake_case_ = name.replace('''decoder_embed''', '''decoder.decoder_embed''' )
if "decoder_norm" in name:
snake_case_ = name.replace('''decoder_norm''', '''decoder.decoder_norm''' )
if "decoder_pred" in name:
snake_case_ = name.replace('''decoder_pred''', '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case_ = name.replace('''norm.weight''', '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case_ = name.replace('''norm.bias''', '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
snake_case_ = name.replace('''head''', '''classifier''' )
return name
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(__UpperCAmelCase )
if key.startswith('''encoder.''' ):
snake_case_ = key.replace('''encoder.''', '''''' )
if "qkv" in key:
snake_case_ = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
snake_case_ = config.decoder_hidden_size
snake_case_ = int(key_split[2] )
snake_case_ = '''decoder.decoder_layers.'''
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = config.hidden_size
snake_case_ = int(key_split[1] )
snake_case_ = '''videomae.encoder.layer.'''
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val
return orig_state_dict
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
snake_case_ = np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = get_videomae_config(__UpperCAmelCase )
if "finetuned" in model_name:
snake_case_ = VideoMAEForVideoClassification(__UpperCAmelCase )
else:
snake_case_ = VideoMAEForPreTraining(__UpperCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case_ = '''pytorch_model.bin'''
gdown.cached_download(__UpperCAmelCase, __UpperCAmelCase, quiet=__UpperCAmelCase )
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
if "model" in files:
snake_case_ = files['''model''']
else:
snake_case_ = files['''module''']
snake_case_ = convert_state_dict(__UpperCAmelCase, __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify model on basic input
snake_case_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
snake_case_ = prepare_video()
snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' )
if "finetuned" not in model_name:
snake_case_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
snake_case_ = torch.load(__UpperCAmelCase )
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits
snake_case_ = [
'''videomae-small-finetuned-kinetics''',
'''videomae-small-finetuned-ssv2''',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'''videomae-base-short''',
'''videomae-base-short-finetuned-kinetics''',
'''videomae-base''',
'''videomae-base-finetuned-kinetics''',
'''videomae-large''',
'''videomae-large-finetuned-kinetics''',
'''videomae-huge-finetuned-kinetics''',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'''videomae-base-short-ssv2''',
'''videomae-base-short-finetuned-ssv2''',
'''videomae-base-ssv2''',
'''videomae-base-finetuned-ssv2''',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case_ = torch.Size([1, 174] )
snake_case_ = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case_ = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case_ = torch.Size([1, 400] )
snake_case_ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case_ = torch.Size([1, 174] )
snake_case_ = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
snake_case_ = torch.Size([1, 1408, 1536] )
snake_case_ = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case_ = torch.Size([1, 174] )
snake_case_ = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3], __UpperCAmelCase, atol=1e-4 )
else:
print('''Logits:''', logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3], __UpperCAmelCase, atol=1e-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case_ = outputs.loss
assert torch.allclose(__UpperCAmelCase, __UpperCAmelCase, atol=1e-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__UpperCAmelCase, organization='''nielsr''' )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 56
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__A =namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
UpperCAmelCase__ : Any = TestCommand(*UpperCamelCase__ )
test_command.run()
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , """README.md""" )
assert os.path.exists(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = DatasetInfosDict.from_directory(UpperCamelCase__ )
UpperCAmelCase__ : Any = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = getattr(dataset_infos["""default"""] , UpperCamelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 163
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case : int = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = ['''input_features''']
def __init__( self :Any ,__snake_case :Union[str, Any]=80 ,__snake_case :Any=1_60_00 ,__snake_case :Any=1_60 ,__snake_case :Optional[Any]=30 ,__snake_case :Optional[int]=4_00 ,__snake_case :List[str]=0.0 ,__snake_case :Union[str, Any]=False ,**__snake_case :str ,) -> Tuple:
super().__init__(
feature_size=__snake_case ,sampling_rate=__snake_case ,padding_value=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
a__ = n_fft
a__ = hop_length
a__ = chunk_length
a__ = chunk_length * sampling_rate
a__ = self.n_samples // hop_length
a__ = sampling_rate
a__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=__snake_case ,min_frequency=0.0 ,max_frequency=80_00.0 ,sampling_rate=__snake_case ,norm='slaney' ,mel_scale='slaney' ,)
def lowerCamelCase__( self :Tuple ,__snake_case :np.array ) -> np.ndarray:
a__ = spectrogram(
__snake_case ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,)
a__ = log_spec[:, :-1]
a__ = np.maximum(__snake_case ,log_spec.max() - 8.0 )
a__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__( __snake_case :List[np.ndarray] ,__snake_case :List[np.ndarray] ,__snake_case :float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
a__ = np.array(__snake_case ,np.intaa )
a__ = []
for vector, length in zip(__snake_case ,attention_mask.sum(-1 ) ):
a__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ = padding_value
normed_input_values.append(__snake_case )
else:
a__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self :List[Any] ,__snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__snake_case :bool = True ,__snake_case :Optional[int] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[str] = "max_length" ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[bool] = None ,**__snake_case :str ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a__ = isinstance(__snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
a__ = is_batched_numpy or (
isinstance(__snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
a__ = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case ,np.ndarray ):
a__ = np.asarray(__snake_case ,dtype=np.floataa )
elif isinstance(__snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ = [np.asarray([raw_speech] ).T]
a__ = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
a__ = self.pad(
__snake_case ,padding=__snake_case ,max_length=max_length if max_length else self.n_samples ,truncation=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
a__ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,)
a__ = np.stack(padded_inputs['input_features'] ,axis=0 )
# make sure list is in array format
a__ = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 )
a__ = [self._np_extract_fbank_features(__snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] ,__snake_case ):
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for feature in input_features]
else:
a__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a__ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
a__ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
def lowerCamelCase__( self :Dict ) -> Dict[str, Any]:
a__ = copy.deepcopy(self.__dict__ )
a__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 109
|
from __future__ import annotations
class snake_case_ :
def __init__( self :List[str] ,__snake_case :int ) -> None:
a__ = data
a__ = None
a__ = None
def __lowercase ( __lowerCAmelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowercase ( __lowerCAmelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowercase ( __lowerCAmelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowercase ( ): # Main function for testing.
a__ = Node(1 )
a__ = Node(2 )
a__ = Node(3 )
a__ = Node(4 )
a__ = Node(5 )
a__ = Node(6 )
a__ = Node(7 )
a__ = Node(8 )
a__ = Node(9 )
print(is_full_binary_tree(__lowerCAmelCase ) )
print(depth_of_tree(__lowerCAmelCase ) )
print('Tree is: ' )
display(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 109
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case_ , 2 ) + pow(snake_case_ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> List[Any]:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
SCREAMING_SNAKE_CASE__ : str = nn.Parameter(SCREAMING_SNAKE_CASE__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ : Any = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(weights[0] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(weights[1] )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(weights[2] )
SCREAMING_SNAKE_CASE__ : Tuple = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).view(-1 , SCREAMING_SNAKE_CASE__ ).contiguous().transpose(0 , 1 ) , )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = weights[0][0][0]
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(layer_norm_a[0] )
SCREAMING_SNAKE_CASE__ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# lsh weights + output
SCREAMING_SNAKE_CASE__ : Dict = weights[0][1]
if len(SCREAMING_SNAKE_CASE__ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE__ , torch_block.attention , SCREAMING_SNAKE_CASE__ )
# intermediate weighs
SCREAMING_SNAKE_CASE__ : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE__ ) == 4:
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_weights[2]
# layernorm 2
SCREAMING_SNAKE_CASE__ : int = np.asarray(intermediate_weights[0][0] )
SCREAMING_SNAKE_CASE__ : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate dense
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(intermediate_weights[1][0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# intermediate out
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(intermediate_weights[4][0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = torch_model.reformer
# word embeds
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
SCREAMING_SNAKE_CASE__ : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# output layer norm
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(weights[7][0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE__ ) , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
# output embeddings
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(weights[9][0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE__ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE__ ) , )
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , "rb" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = pickle.load(SCREAMING_SNAKE_CASE__ )["weights"]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 191
|
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = name
SCREAMING_SNAKE_CASE__ : Tuple = val
def __str__( self : str ) -> List[Any]:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : str, _UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.build_heap(_UpperCAmelCase )
def __getitem__( self : Union[str, Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return self.get_value(_UpperCAmelCase )
def A_ ( self : int, _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return (idx - 1) // 2
def A_ ( self : Optional[Any], _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return idx * 2 + 1
def A_ ( self : str, _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return idx * 2 + 2
def A_ ( self : Tuple, _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self.heap_dict[key]
def A_ ( self : Optional[int], _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(_UpperCAmelCase ) - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent_idx(_UpperCAmelCase )
for idx, i in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = idx
SCREAMING_SNAKE_CASE__ : Optional[Any] = i.val
for i in range(_UpperCAmelCase, -1, -1 ):
self.sift_down(_UpperCAmelCase, _UpperCAmelCase )
return array
def A_ ( self : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE__ : str = self.get_left_child_idx(_UpperCAmelCase ) # noqa: E741
SCREAMING_SNAKE_CASE__ : Tuple = self.get_right_child_idx(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = idx
if l < len(_UpperCAmelCase ) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE__ : List[Any] = l
if r < len(_UpperCAmelCase ) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE__ : int = r
if smallest != idx:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = smallest
else:
break
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_parent_idx(_UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE__ : Dict = p
SCREAMING_SNAKE_CASE__ : List[str] = self.get_parent_idx(_UpperCAmelCase )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
return self.heap[0]
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE__ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap )
return x
def A_ ( self : Union[str, Any], _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
self.heap.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = len(self.heap ) - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return len(self.heap ) == 0
def A_ ( self : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE__ : Tuple = new_value
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCamelCase : Tuple = Node('''R''', -1)
_lowerCamelCase : int = Node('''B''', 6)
_lowerCamelCase : str = Node('''A''', 3)
_lowerCamelCase : Optional[Any] = Node('''X''', 1)
_lowerCamelCase : str = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCamelCase : int = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __magic_name__ ( A : List[str], A : str, A : str, A : Path, A : str = None, A : str = None, A : str = None, ):
'''simple docstring'''
if config_name_or_path is None:
a = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
a = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a = question_encoder_name_or_path
a = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
a = RagConfig.from_pretrained(A )
a = AutoConfig.from_pretrained(A )
a = AutoConfig.from_pretrained(A )
a = gen_config
a = question_encoder_config
a = model_class.from_pretrained_question_encoder_generator(
A, A, config=A )
rag_model.save_pretrained(A )
# Sanity check.
model_class.from_pretrained(A )
# Save tokenizers.
a = AutoTokenizer.from_pretrained(A )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
a = AutoTokenizer.from_pretrained(A )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
__lowerCAmelCase : Any = parser.parse_args()
__lowerCAmelCase : Optional[int] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 107
|
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCAmelCase : Tuple = tuple[int, int]
class lowercase :
def __init__( self , A_ , A_ ) -> None:
"""simple docstring"""
UpperCamelCase = vertices
UpperCamelCase = {
(min(A_ ), max(A_ )): weight for edge, weight in edges.items()
}
def __UpperCamelCase ( self , A_ , A_ ) -> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCamelCase = weight
def __UpperCamelCase ( self ) -> Graph:
"""simple docstring"""
UpperCamelCase = Graph({min(self.vertices )} , {} )
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCamelCase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCamelCase = edge
UpperCamelCase = weight
subgraph.add_edge(A_ , A_ )
return subgraph
def A ( lowercase = "p107_network.txt" ) -> int:
'''simple docstring'''
UpperCamelCase = os.path.abspath(os.path.dirname(lowercase ) )
UpperCamelCase = os.path.join(lowercase , lowercase )
UpperCamelCase = {}
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
with open(lowercase ) as f:
UpperCamelCase = f.read().strip().split('\n' )
UpperCamelCase = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase ) ):
for edgea in range(lowercase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCamelCase = int(adjaceny_matrix[edgea][edgea] )
UpperCamelCase = Graph(set(range(len(lowercase ) ) ) , lowercase )
UpperCamelCase = graph.prims_algorithm()
UpperCamelCase = sum(graph.edges.values() )
UpperCamelCase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 222
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['keras_nlp']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''keras_nlp'''] )
| 9
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__lowerCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 9
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=_UpperCamelCase , )
assert hasattr(self , """env""" )
def _snake_case ( self , lowercase ) -> List[Any]:
lowerCAmelCase = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
lowerCAmelCase = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_UpperCamelCase , instance_count=_UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCamelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_UpperCamelCase , py_version="""py36""" , )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
TrainingJobAnalytics(_UpperCamelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def _snake_case ( self , lowercase ) -> Dict:
lowerCAmelCase = self.create_estimator(_UpperCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _UpperCamelCase )
| 46
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.0_2 , _UpperCamelCase=0.9 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : List[str] = parent
_lowercase : Tuple = batch_size
_lowercase : Tuple = image_size
_lowercase : Any = num_channels
_lowercase : Tuple = patch_size
_lowercase : Union[str, Any] = tubelet_size
_lowercase : str = num_frames
_lowercase : Any = is_training
_lowercase : Tuple = use_labels
_lowercase : List[Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[int] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : Optional[Any] = initializer_range
_lowercase : int = mask_ratio
_lowercase : Union[str, Any] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_lowercase : List[str] = (image_size // patch_size) ** 2
_lowercase : Optional[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_lowercase : str = int(mask_ratio * self.seq_length )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = VideoMAEModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Dict = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = VideoMAEForPreTraining(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Optional[int] = torch.ones((self.num_masks,) )
_lowercase : List[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_lowercase : Tuple = mask.expand(self.batch_size , -1 ).bool()
_lowercase : Tuple = model(_UpperCamelCase , _UpperCamelCase )
# model only returns predictions for masked patches
_lowercase : Tuple = mask.sum().item()
_lowercase : Optional[Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : List[Any] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = VideoMAEModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
_lowercase : Any = copy.deepcopy(_UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_lowercase : Union[str, Any] = torch.ones((self.model_tester.num_masks,) )
_lowercase : Dict = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_lowercase : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool()
_lowercase : Any = bool_masked_pos.to(_UpperCamelCase )
if return_labels:
if model_class in [
*get_values(_UpperCamelCase ),
]:
_lowercase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(_UpperCamelCase )
_lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : str = [*signature.parameters.keys()]
_lowercase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : int = VideoMAEModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
_lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Optional[int] = True
for model_class in self.all_model_classes:
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : List[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_lowercase : int = True
_lowercase : str = False
_lowercase : Any = True
_lowercase : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Union[str, Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : List[str] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : Tuple = True
_lowercase : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Dict = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : Tuple = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_lowercase : str = len(_UpperCamelCase )
# Check attention is always last and order is fine
_lowercase : List[Any] = True
_lowercase : List[str] = True
_lowercase : Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCamelCase ) )
_lowercase : Optional[int] = outputs.attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_lowercase : Optional[int] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowercase : Tuple = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowercase : List[str] = outputs.hidden_states
_lowercase : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_lowercase : List[str] = self.model_tester.seq_length - self.model_tester.num_masks
_lowercase : Optional[Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Dict = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _A ( ) -> Any:
_lowercase : Tuple = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
_lowercase : int = np.load(snake_case )
return list(snake_case )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
_UpperCamelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_video()
_lowercase : Union[str, Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : str = model(**_UpperCamelCase )
# verify the logits
_lowercase : List[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_lowercase : int = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_UpperCamelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Optional[Any] = prepare_video()
_lowercase : List[Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# add boolean mask, indicating which patches to mask
_lowercase : int = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
_lowercase : Any = torch.load(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**_UpperCamelCase )
# verify the logits
_lowercase : Dict = torch.Size([1, 1408, 1536] )
_lowercase : Tuple = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_UpperCamelCase )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_lowercase : Tuple = torch.tensor([0.5_1_4_2] , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_lowercase : Dict = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_UpperCamelCase ).to(
_UpperCamelCase )
with torch.no_grad():
_lowercase : Optional[int] = model(**_UpperCamelCase )
_lowercase : List[str] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCamelCase , atol=1E-4 ) )
| 250
| 0
|
from __future__ import annotations
def UpperCamelCase ( __magic_name__ : int | str ) -> bool:
"""simple docstring"""
lowercase__ = str(__magic_name__ )
return n == n[::-1]
def UpperCamelCase ( __magic_name__ : int = 100_0000 ) -> List[str]:
"""simple docstring"""
lowercase__ = 0
for i in range(1 , __magic_name__ ):
if is_palindrome(__magic_name__ ) and is_palindrome(bin(__magic_name__ ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 146
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[str] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def __call__(self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Any , ) -> Tuple:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = 1
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = len(_UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_UpperCAmelCase )}.''' )
# get prompt text embeddings
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , _UpperCAmelCase , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [""""""]
elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !='''
f''' {type(_UpperCAmelCase )}.''' )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(_UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
_UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(
self.device )
else:
lowercase__ = torch.randn(
_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase__ = latents_reference.to(self.device )
lowercase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0 )
lowercase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = 1 / 0.18_215 * latents
lowercase__ = self.vae.decode(_UpperCAmelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
lowercase__ , lowercase__ = self.safety_checker(
images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
| 146
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
raise NotImplementedError()
| 107
|
def __magic_name__ ( A : str ):
'''simple docstring'''
a = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __magic_name__ ( A : str ):
'''simple docstring'''
a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
a = remove_duplicates(key.upper() )
a = len(A )
# First fill cipher with key characters
a = {alphabet[i]: char for i, char in enumerate(A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A ), 26 ):
a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
a = alphabet[i - offset]
a = char
return cipher_alphabet
def __magic_name__ ( A : str, A : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(A, A ) for ch in message.upper() )
def __magic_name__ ( A : str, A : dict[str, str] ):
'''simple docstring'''
a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A, A ) for ch in message.upper() )
def __magic_name__ ( ):
'''simple docstring'''
a = input("Enter message to encode or decode: " ).strip()
a = input("Enter keyword: " ).strip()
a = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
a = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
a = create_cipher_map(A )
print(func(A, A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 107
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _snake_case ( _snake_case : str , _snake_case : str ) -> str | Literal[False]:
'''simple docstring'''
_A = list(_snake_case )
_A = list(_snake_case )
_A = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count += 1
_A = '_'
if count > 1:
return False
else:
return "".join(_snake_case )
def _snake_case ( _snake_case : list[str] ) -> list[str]:
'''simple docstring'''
_A = []
while True:
_A = ['$'] * len(_snake_case )
_A = []
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
_A = compare_string(binary[i] , binary[j] )
if k is False:
_A = '*'
_A = '*'
temp.append('X' )
for i in range(len(_snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_snake_case ) == 0:
return pi
_A = list(set(_snake_case ) )
def _snake_case ( _snake_case : int , _snake_case : Sequence[float] ) -> list[str]:
'''simple docstring'''
_A = []
for minterm in minterms:
_A = ''
for _ in range(_snake_case ):
_A = str(minterm % 2 ) + string
minterm //= 2
temp.append(_snake_case )
return temp
def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : int ) -> bool:
'''simple docstring'''
_A = list(_snake_case )
_A = list(_snake_case )
_A = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _snake_case ( _snake_case : list[list[int]] , _snake_case : list[str] ) -> list[str]:
'''simple docstring'''
_A = []
_A = [0] * len(_snake_case )
for i in range(len(chart[0] ) ):
_A = 0
_A = -1
for j in range(len(_snake_case ) ):
if chart[j][i] == 1:
count += 1
_A = j
if count == 1:
_A = 1
for i in range(len(_snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_snake_case ) ):
_A = 0
temp.append(prime_implicants[i] )
while True:
_A = 0
_A = -1
_A = 0
for i in range(len(_snake_case ) ):
_A = chart[i].count(1 )
if count_n > max_n:
_A = count_n
_A = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_snake_case ) ):
_A = 0
def _snake_case ( _snake_case : list[str] , _snake_case : list[str] ) -> list[list[int]]:
'''simple docstring'''
_A = [[0 for x in range(len(_snake_case ) )] for x in range(len(_snake_case ) )]
for i in range(len(_snake_case ) ):
_A = prime_implicants[i].count('_' )
for j in range(len(_snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , _snake_case ):
_A = 1
return chart
def _snake_case ( ) -> None:
'''simple docstring'''
_A = int(input('Enter the no. of variables\n' ) )
_A = [
float(_snake_case )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_A = decimal_to_binary(_snake_case , _snake_case )
_A = check(_snake_case )
print('Prime Implicants are:' )
print(_snake_case )
_A = prime_implicant_chart(_snake_case , _snake_case )
_A = selection(_snake_case , _snake_case )
print('Essential Prime Implicants are:' )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 271
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : "DiagonalGaussianDistribution"
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = True
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 3 , _UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCAmelCase : Tuple[int] = (64,) , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "silu" , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
_A = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
# pass init params to Decoder
_A = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , act_fn=_UpperCAmelCase , )
_A = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_A = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
_A = False
_A = False
# only relevant if vae tiling is enabled
_A = self.config.sample_size
_A = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_A = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_A = 0.25
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False ):
if isinstance(_UpperCAmelCase , (Encoder, Decoder) ):
_A = value
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : bool = True ):
_A = use_tiling
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.enable_tiling(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = True
def lowerCAmelCase_ ( self : str ):
_A = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self : str ):
_A = {}
def fn_recursive_add_processors(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
_A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_A = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : int ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
_A = [self.encoder(_UpperCAmelCase ) for x_slice in x.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_slicing and z.shape[0] > 1:
_A = [self._decode(_UpperCAmelCase ).sample for z_slice in z.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self._decode(_UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
_A = min(a.shape[2] , b.shape[2] , _UpperCAmelCase )
for y in range(_UpperCAmelCase ):
_A = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ):
_A = min(a.shape[3] , b.shape[3] , _UpperCAmelCase )
for x in range(_UpperCAmelCase ):
_A = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_latent_min_size * self.tile_overlap_factor )
_A = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A = []
for i in range(0 , x.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , x.shape[3] , _UpperCAmelCase ):
_A = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_sample_min_size * self.tile_overlap_factor )
_A = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A = []
for i in range(0 , z.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , z.shape[3] , _UpperCAmelCase ):
_A = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[torch.Generator] = None , ):
_A = sample
_A = self.encode(_UpperCAmelCase ).latent_dist
if sample_posterior:
_A = posterior.sample(generator=_UpperCAmelCase )
else:
_A = posterior.mode()
_A = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
| 271
| 1
|
from __future__ import annotations
import requests
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case__ ).json()
def __lowerCamelCase ( snake_case__ = 10 ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_SCREAMING_SNAKE_CASE = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def __lowerCamelCase ( snake_case__ = 10 ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = hackernews_top_stories(snake_case__ )
return "\n".join("""* [{title}]({url})""".format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 306
|
"""simple docstring"""
A: Union[str, Any] = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def _snake_case ( UpperCamelCase : float ):
assert type(UpperCamelCase ) in (int, float) and decimal == int(UpperCamelCase )
UpperCAmelCase : str = int(UpperCamelCase )
UpperCAmelCase : Optional[int] = """"""
UpperCAmelCase : List[str] = False
if decimal < 0:
UpperCAmelCase : Any = True
decimal *= -1
while decimal > 0:
UpperCAmelCase , UpperCAmelCase : Dict = divmod(UpperCamelCase , 16 )
UpperCAmelCase : Union[str, Any] = values[remainder] + hexadecimal
UpperCAmelCase : int = """0x""" + hexadecimal
if negative:
UpperCAmelCase : Optional[int] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__A : Optional[Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__A : str = {
'''camembert-base''': 512,
}
__A : Any = '''▁'''
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Any , A : Any , A : Optional[Any]="<s>" , A : Optional[Any]="</s>" , A : str="</s>" , A : Optional[int]="<s>" , A : List[str]="<unk>" , A : List[Any]="<pad>" , A : Optional[Any]="<mask>" , A : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , A : Optional[Dict[str, Any]] = None , **A : List[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowercase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
lowercase_ : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase_ : List[Any] = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowercase_ : Dict = len(self.fairseq_tokens_to_ids )
lowercase_ : List[Any] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : Any , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def A ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : List[Any] = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : List[str] ) -> Union[str, Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A ( self : Any ) -> Optional[Any]:
lowercase_ : List[Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Optional[int] , A : str ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def A ( self : Union[str, Any] , A : str ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def A ( self : int , A : List[str] ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : Tuple , A : Tuple ) -> Optional[Any]:
lowercase_ : Tuple = []
lowercase_ : int = ''''''
lowercase_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
lowercase_ : Dict = True
lowercase_ : Any = []
else:
current_sub_tokens.append(A )
lowercase_ : int = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Tuple = None
return state
def __setstate__( self : Optional[int] , A : Optional[Any] ) -> Union[str, Any]:
lowercase_ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[str] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : List[Any] , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Union[str, Any] = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , '''wb''' ) as fi:
lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 33
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 1
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Dict =get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Tuple = PegasusTokenizer
__lowerCAmelCase :Union[str, Any] = PegasusTokenizerFast
__lowerCAmelCase :Any = True
__lowerCAmelCase :List[str] = True
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = PegasusTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = """</s>"""
a__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(__lowercase ) , 1_1_0_3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
a__ : Optional[int] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
a__ : str = rust_tokenizer([raw_input_str] , return_tensors=__lowercase , add_special_tokens=__lowercase ).input_ids[0]
a__ : int = py_tokenizer([raw_input_str] , return_tensors=__lowercase , add_special_tokens=__lowercase ).input_ids[0]
self.assertListEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
a__ : Optional[Any] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
a__ : Tuple = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a__ : Tuple = tokenizer([raw_input_str] , return_tensors=__lowercase ).input_ids[0]
self.assertListEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
a__ : List[Any] = """To ensure a smooth flow of bank resolutions."""
a__ : str = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
a__ : int = tokenizer([raw_input_str] , return_tensors=__lowercase ).input_ids[0]
self.assertListEqual(__lowercase , __lowercase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = ["""This is going to be way too long.""" * 1_5_0, """short example"""]
a__ : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
a__ : Tuple = self._large_tokenizer(__lowercase , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
a__ : Union[str, Any] = self._large_tokenizer(
text_target=__lowercase , max_length=5 , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowercase ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = {"""input_ids""": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Dict = PegasusTokenizer
__lowerCAmelCase :Union[str, Any] = PegasusTokenizerFast
__lowerCAmelCase :Tuple = True
__lowerCAmelCase :Dict = True
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : Optional[int] = PegasusTokenizer(__lowercase , offset=0 , mask_token_sent=__lowercase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
a__ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
a__ : Optional[int] = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
a__ : Tuple = rust_tokenizer([raw_input_str] , return_tensors=__lowercase , add_special_tokens=__lowercase ).input_ids[0]
a__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=__lowercase , add_special_tokens=__lowercase ).input_ids[0]
self.assertListEqual(__lowercase , __lowercase )
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = ["""This is going to be way too long.""" * 1_0_0_0, """short example"""]
a__ : int = ["""not super long but more than 5 tokens""", """tiny"""]
a__ : Optional[Any] = self._large_tokenizer(__lowercase , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
a__ : List[str] = self._large_tokenizer(
text_target=__lowercase , max_length=5 , padding=__lowercase , truncation=__lowercase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowercase ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
a__ : str = self._large_tokenizer(__lowercase ).input_ids
self.assertListEqual(
__lowercase , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 266
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase : int ={
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]:
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]:
_snake_case = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = np.dot(x.T , h - y ) / y.size
_snake_case = theta - alpha * gradient # updating the weights
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 1_00 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase__ = datasets.load_iris()
UpperCAmelCase__ = iris.data[:, :2]
UpperCAmelCase__ = (iris.target != 0) * 1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 288
| 0
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = [], []
while len(_A ) > 1:
lowerCAmelCase_ , lowerCAmelCase_ = min(_A ), max(_A )
start.append(_A )
end.append(_A )
collection.remove(_A )
collection.remove(_A )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_A = input('''Enter numbers separated by a comma:\n''').strip()
_A = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 167
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A = logging.get_logger(__name__)
_A = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A ( __UpperCAmelCase , __UpperCAmelCase ):
__snake_case = 'dinat'
__snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self, UpperCamelCase__=4, UpperCamelCase__=3, UpperCamelCase__=64, UpperCamelCase__=[3, 4, 6, 5], UpperCamelCase__=[2, 4, 8, 16], UpperCamelCase__=7, UpperCamelCase__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], UpperCamelCase__=3.0, UpperCamelCase__=True, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__="gelu", UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=0.0, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__, ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(UpperCamelCase__ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = dilations
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1, len(UpperCamelCase__ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__, out_indices=UpperCamelCase__, stage_names=self.stage_names )
| 167
| 1
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 270
|
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270
| 1
|
def snake_case_ ( snake_case ) -> list[list]:
lowercase__: Tuple = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase__: str = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase__: Union[str, Any] = column
continue
lowercase__: Any = column / magnitude
# Subtract to cancel term
lowercase__: Optional[int] = current_set[0]
lowercase__: Optional[Any] = [first_row]
lowercase__: str = current_set[1::]
for row in current_set:
lowercase__: Dict = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase__: Optional[Any] = final_set[0]
lowercase__: int = []
lowercase__: int = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase__: Optional[int] = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase__: int = resultant
return final_set
def snake_case_ ( snake_case ) -> list:
if len(lowerCAmelCase__ ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
lowercase__: int = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase__: Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
lowercase__: Tuple = data_set.copy()
lowercase__: List[Any] = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase__: int = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , lowerCAmelCase__ )
lowercase__: List[Any] = data_set.copy()
lowercase__: List[str] = simplify(lowerCAmelCase__ )
lowercase__: Dict = simplified[::-1]
lowercase__: List[str] = []
for row in simplified:
lowercase__: Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase__: List[str] = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase__: List[str] = temp_row[1::]
lowercase__: Optional[int] = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase__: List[str] = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 366
|
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288
| 0
|
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
a_ = 3_0_0 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a_ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="ernie_m"
UpperCamelCase ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCamelCase_ = 25_00_02 , UpperCamelCase_ = 7_68 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = 30_72 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 5_14 , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1E-05 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : List[str] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = max_position_embeddings
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = classifier_dropout
__lowercase : List[Any] = is_decoder
__lowercase : List[str] = act_dropout
| 249
| 1
|
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1E6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
UpperCamelCase_ = "".join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 60
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False )-> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected string as input, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = f"Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE_ )}"
raise ValueError(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = input_str.split("_" )
UpperCamelCase_ = 0 if use_pascal else 1
UpperCamelCase_ = words[start_index:]
UpperCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60
| 1
|
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9
| 1
|
"""simple docstring"""
from torch import nn
def _snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 369
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_UpperCAmelCase : Optional[int] ="""src/transformers"""
_UpperCAmelCase : str ="""docs/source/en"""
_UpperCAmelCase : Optional[int] ="""."""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase_ : int = f.readlines()
# Find the start prompt.
lowerCAmelCase_ : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase_ : List[str] = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_UpperCAmelCase : Optional[Any] ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
_UpperCAmelCase : Optional[int] =re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
_UpperCAmelCase : Dict =re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCAmelCase : Optional[Any] =re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
_UpperCAmelCase : Optional[int] =direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : str = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase_ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Tuple = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase_ )
lowerCAmelCase_ : int = (width - text_length) // 2
lowerCAmelCase_ : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( )-> str:
lowerCAmelCase_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowerCAmelCase_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowerCAmelCase_ : Tuple = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[Any] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = collections.defaultdict(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = collections.defaultdict(lowerCAmelCase_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[int] = None
if attr_name.endswith('''Tokenizer''' ):
lowerCAmelCase_ : Union[str, Any] = slow_tokenizers
lowerCAmelCase_ : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowerCAmelCase_ : int = fast_tokenizers
lowerCAmelCase_ : Union[str, Any] = attr_name[:-13]
elif _re_tf_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = tf_models
lowerCAmelCase_ : str = _re_tf_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Tuple = flax_models
lowerCAmelCase_ : Union[str, Any] = _re_flax_models.match(lowerCAmelCase_ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase_ ) is not None:
lowerCAmelCase_ : Any = pt_models
lowerCAmelCase_ : List[Any] = _re_pt_models.match(lowerCAmelCase_ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase_ ) > 0:
if attr_name in model_name_to_prefix.values():
lowerCAmelCase_ : Union[str, Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ : Any = ''''''.join(camel_case_split(lowerCAmelCase_ )[:-1] )
# Let's build that table!
lowerCAmelCase_ : int = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowerCAmelCase_ : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowerCAmelCase_ : Union[str, Any] = [len(lowerCAmelCase_ ) + 2 for c in columns]
lowerCAmelCase_ : Optional[Any] = max([len(lowerCAmelCase_ ) for name in model_names] ) + 2
# Build the table per se
lowerCAmelCase_ : Dict = '''|''' + '''|'''.join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for c, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowerCAmelCase_ : List[str] = {True: '''✅''', False: '''❌'''}
for name in model_names:
lowerCAmelCase_ : List[Any] = model_name_to_prefix[name]
lowerCAmelCase_ : Union[str, Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase_ , lowerCAmelCase_ ) for l, w in zip(lowerCAmelCase_ , lowerCAmelCase_ )] ) + "|\n"
return table
def lowerCAmelCase ( lowerCAmelCase_=False )-> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowerCAmelCase_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 262
|
from math import sqrt
def lowerCAmelCase ( lowerCAmelCase_ )-> bool:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowerCAmelCase_ : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowerCAmelCase_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCAmelCase_ : Tuple = False
break
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'status' must been from type bool"
return status
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCAmelCase_ : str = 0
# filters actual prime numbers.
lowerCAmelCase_ : Optional[int] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowerCAmelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase_ ):
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowerCAmelCase_ : int = [] # this list will be returns of the function.
# potential prime number factors.
lowerCAmelCase_ : List[Any] = 2
lowerCAmelCase_ : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase_ ):
while quotient != 1:
if is_prime(lowerCAmelCase_ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type list"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : Dict = 0
# prime factorization of 'number'
lowerCAmelCase_ : Any = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = max(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCAmelCase_ : List[Any] = 0
# prime factorization of 'number'
lowerCAmelCase_ : Dict = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = min(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'ans' must been from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (number > 2) and is_even(lowerCAmelCase_ )
), "'number' must been an int, even and > 2"
lowerCAmelCase_ : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCAmelCase_ : int = get_prime_numbers(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = len(lowerCAmelCase_ )
# run variable for while-loops.
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : Tuple = None
# exit variable. for break up the loops
lowerCAmelCase_ : int = True
while i < len_pn and loop:
lowerCAmelCase_ : int = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCAmelCase_ : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (len(lowerCAmelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Dict:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : int = 0
while numbera != 0:
lowerCAmelCase_ : str = numbera % numbera
lowerCAmelCase_ : List[Any] = numbera
lowerCAmelCase_ : Any = rest
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCAmelCase_ : List[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
lowerCAmelCase_ : int = prime_factorization(lowerCAmelCase_ )
elif numbera == 1 or numbera == 1:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
lowerCAmelCase_ : Tuple = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(max(lowerCAmelCase_ , lowerCAmelCase_ ) ):
ans *= n
else:
lowerCAmelCase_ : List[str] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCAmelCase_ : Optional[Any] = prime_fac_a.count(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
ans *= n
done.append(lowerCAmelCase_ )
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'number' must been a positive int"
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and is_prime(
lowerCAmelCase_ ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
assert (
is_prime(lowerCAmelCase_ ) and is_prime(lowerCAmelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCAmelCase_ : Union[str, Any] = p_number_a + 1 # jump to the next number
lowerCAmelCase_ : Optional[int] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowerCAmelCase_ : List[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCAmelCase_ : Union[str, Any] = get_divisors(lowerCAmelCase_ )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCAmelCase_ : Optional[Any] = gcd(abs(lowerCAmelCase_ ) , abs(lowerCAmelCase_ ) )
# precondition
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowerCAmelCase_ : Any = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
lowerCAmelCase_ : Union[str, Any] = ans
ans += fiba
lowerCAmelCase_ : Optional[Any] = tmp
return ans
| 262
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_A = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
_A = get_tests_dir('''fixtures/vocab.json''')
_A = get_tests_dir('''fixtures''')
class A ( unittest.TestCase ):
__snake_case = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 0
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = WavaVecaConfig()
lowerCAmelCase_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
lowerCAmelCase_ = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_, os.path.join(lowercase_, lowercase_ ) )
copyfile(lowercase_, os.path.join(lowercase_, '''vocab.json''' ) )
lowerCAmelCase_ = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = WavaVecaFeatureExtractor()
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowerCAmelCase_ = WavaVecaProcessor(lowercase_, lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_, lowercase_ ), '''r''' ) as f:
lowerCAmelCase_ = json.load(lowercase_ )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowercase_, lowercase_ ), '''w''' ) as f:
f.write(json.dumps(lowercase_ ) )
lowerCAmelCase_ = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = WavaVecaFeatureExtractor()
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
lowerCAmelCase_ = WavaVecaProcessor(lowercase_, lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_, lowercase_ ), '''r''' ) as f:
lowerCAmelCase_ = json.load(lowercase_ )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowercase_, lowercase_ ), '''w''' ) as f:
f.write(json.dumps(lowercase_ ) )
lowerCAmelCase_ = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_, os.path.join(lowercase_, '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(lowercase_, lowercase_ ), '''w''' ) as f:
f.write('''{}''' )
lowerCAmelCase_ = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
lowerCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
lowerCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowercase_ )
lowerCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
lowerCAmelCase_ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__, '''NewFeatureExtractor''' )
lowerCAmelCase_ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowercase_, use_fast=lowercase_ )
lowerCAmelCase_ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__, '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
try:
AutoConfig.register('''custom''', lowercase_ )
AutoFeatureExtractor.register(lowercase_, lowercase_ )
AutoTokenizer.register(lowercase_, slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_, lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_, lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = os.path.join(lowercase_, '''vocab.txt''' )
with open(lowercase_, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowerCAmelCase_ = CustomTokenizer(lowercase_ )
lowerCAmelCase_ = CustomProcessor(lowercase_, lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
lowerCAmelCase_ = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_, lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
class A ( a_ ):
__snake_case = False
class A ( a_ ):
__snake_case = False
class A ( a_ ):
__snake_case = 'AutoFeatureExtractor'
__snake_case = 'AutoTokenizer'
__snake_case = False
try:
AutoConfig.register('''custom''', lowercase_ )
AutoFeatureExtractor.register(lowercase_, lowercase_ )
AutoTokenizer.register(lowercase_, slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_, lowercase_ )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''', trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__, '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__, '''BertTokenizerFast''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__, '''ConvNextImageProcessor''' )
@is_staging_test
class A ( unittest.TestCase ):
__snake_case = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
lowerCAmelCase_ = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_, '''test-processor''' ), push_to_hub=lowercase_, use_auth_token=self._token )
lowerCAmelCase_ = WavaVecaProcessor.from_pretrained(f"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_, getattr(new_processor.feature_extractor, lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_, '''test-processor-org''' ), push_to_hub=lowercase_, use_auth_token=self._token, organization='''valid_org''', )
lowerCAmelCase_ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_, getattr(new_processor.feature_extractor, lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase_ = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = os.path.join(lowercase_, '''vocab.txt''' )
with open(lowercase_, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
lowerCAmelCase_ = CustomTokenizer(lowercase_ )
lowerCAmelCase_ = CustomProcessor(lowercase_, lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"{USER}/test-dynamic-processor", token=self._token )
lowerCAmelCase_ = Repository(lowercase_, clone_from=f"{USER}/test-dynamic-processor", token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map, {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
}, )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_, '''tokenizer_config.json''' ) ) as f:
lowerCAmelCase_ = json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config['''auto_map'''], {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
}, )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_, '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_, '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_, '''custom_processing.py''' ) ) )
repo.push_to_hub()
lowerCAmelCase_ = AutoProcessor.from_pretrained(f"{USER}/test-dynamic-processor", trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__, '''CustomProcessor''' )
| 358
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=7, UpperCamelCase__=3, UpperCamelCase__=30, UpperCamelCase__=400, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=[0.5, 0.5, 0.5], UpperCamelCase__=True, UpperCamelCase__=1 / 255, UpperCamelCase__=True, ):
"""simple docstring"""
lowerCAmelCase_ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(UpperCamelCase__, Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase_ = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase_ = self.size['''shortest_edge''']
lowerCAmelCase_ = self.size['''shortest_edge''']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[0] )[0]
lowerCAmelCase_ = max(UpperCamelCase__, key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''do_pad''' ) )
self.assertTrue(hasattr(UpperCamelCase__, '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCAmelCase_ = image_processing(UpperCamelCase__, return_tensors='''pt''' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(UpperCamelCase__, batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowerCAmelCase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase_ = image_processing(images=UpperCamelCase__, annotations=UpperCamelCase__, masks_path=UpperCamelCase__, return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], UpperCamelCase__, atol=1E-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], UpperCamelCase__ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], UpperCamelCase__, atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], UpperCamelCase__ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], UpperCamelCase__ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], UpperCamelCase__ ) )
# verify masks
lowerCAmelCase_ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), UpperCamelCase__ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], UpperCamelCase__ ) )
# verify size
lowerCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], UpperCamelCase__ ) )
| 167
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowerCamelCase ( UpperCAmelCase_ : Any ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=UpperCAmelCase_ )
a :Optional[int] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCAmelCase_ )
EnvironmentCommand.register_subcommand(UpperCAmelCase_ )
TestCommand.register_subcommand(UpperCAmelCase_ )
RunBeamCommand.register_subcommand(UpperCAmelCase_ )
DummyDataCommand.register_subcommand(UpperCAmelCase_ )
# Parse args
a , a :Dict = parser.parse_known_args()
if not hasattr(UpperCAmelCase_ , '''func''' ):
parser.print_help()
exit(1 )
a :Union[str, Any] = parse_unknown_args(UpperCAmelCase_ )
# Run
a :Any = args.func(UpperCAmelCase_ , **UpperCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 94
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase_ : Tuple = 'roberta'
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 358
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase_ = float('''nan''')
class __lowerCamelCase :
def __init__( self , lowerCamelCase ) -> Any:
snake_case_ = sys.stdout
snake_case_ = open(lowerCamelCase , """a""" )
def __getattr__( self , lowerCamelCase ) -> Optional[int]:
return getattr(self.stdout , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[int]:
self.stdout.write(lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , lowerCamelCase , 0 , re.M ) )
def UpperCamelCase( lowercase_=80 , lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = []
# deal with critical env vars
snake_case_ = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
snake_case_ = os.environ.get(lowercase_ , lowercase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(lowercase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ = []
snake_case_ = """"""
while len(lowercase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(lowercase_ ) == 0 or len(lowercase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase_ )
snake_case_ = """"""
return "\\\n".join(lowercase_ )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
snake_case_ = subprocess.run(lowercase_ , capture_output=lowercase_ , text=lowercase_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
snake_case_ = variation.replace(""" """ , """-""" )
with open(Path(lowercase_ ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(lowercase_ ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
snake_case_ = json.load(lowercase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ = f'''{preamble}: '''
snake_case_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase_ ) , desc=lowercase_ , leave=lowercase_ ):
snake_case_ = process_run_single(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase_ ):
metrics.append(lowercase_ )
results.append(lowercase_ )
outcome += "✓"
else:
outcome += "✘"
snake_case_ = f'''\33[2K\r{outcome}'''
if len(lowercase_ ) > 0:
snake_case_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ = round(mean_metrics[target_metric_key] , 2 )
snake_case_ = f'''{outcome} {mean_target}'''
if len(lowercase_ ) > 1:
results_str += f''' {tuple(round(lowercase_ , 2 ) for x in results )}'''
print(lowercase_ )
snake_case_ = variation
return mean_metrics
else:
print(lowercase_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase( ) -> Any:
'''simple docstring'''
snake_case_ = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = pd.DataFrame(lowercase_ )
snake_case_ = """variation"""
snake_case_ = """diff_%"""
snake_case_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase_ ):
# as a fallback, use the minimal value as the sentinel
snake_case_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase_ ):
snake_case_ = df.apply(
lambda lowercase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
snake_case_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ = df.reindex(lowercase_ , axis="""columns""" ) # reorder cols
# capitalize
snake_case_ = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
snake_case_ = df.rename(lambda lowercase_ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
snake_case_ = df.rename(lambda lowercase_ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
snake_case_ = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
print("""\n\n""".join(lowercase_ ) )
def UpperCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=lowercase_ , type=lowercase_ , nargs="""+""" , required=lowercase_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=lowercase_ , type=lowercase_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=lowercase_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=lowercase_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=lowercase_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=lowercase_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
snake_case_ = parser.parse_args()
snake_case_ = args.output_dir
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
snake_case_ = get_base_command(lowercase_ , lowercase_ )
# split each dimension into its --foo variations
snake_case_ = [list(map(str.strip , re.split(r"""\|""" , lowercase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ = list(map(str.strip , map(""" """.join , itertools.product(*lowercase_ ) ) ) )
snake_case_ = max(len(lowercase_ ) for x in variations )
# split wanted keys
snake_case_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ = Tee(lowercase_ )
print(f'''\n*** Running {len(lowercase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(lowercase_ )}''' )
snake_case_ = """variation"""
snake_case_ = []
for id, variation in enumerate(tqdm(lowercase_ , desc="""Total completion: """ , leave=lowercase_ ) ):
snake_case_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , args.target_metric_key , lowercase_ , args.repeat_times , lowercase_ , args.verbose , ) )
process_results(lowercase_ , args.target_metric_key , lowercase_ , args.base_variation , lowercase_ )
if __name__ == "__main__":
main()
| 34
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_00 , ) -> List[Any]:
__lowerCamelCase = x_start
__lowerCamelCase = fnc(UpperCamelCase__ )
__lowerCamelCase = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCamelCase = (x_end - x_start) / steps + xa
__lowerCamelCase = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__lowerCamelCase = xa
__lowerCamelCase = fxa
return length
if __name__ == "__main__":
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[int]:
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__UpperCAmelCase =1_0
while i <= 1_0_0_0_0_0:
print(f'With {i} steps: {line_length(f, -1_0, 1_0, i)}')
i *= 1_0
| 67
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__a : Tuple = """pt"""
elif is_tf_available():
__a : int = """tf"""
else:
__a : Tuple = """jax"""
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = ByTaTokenizer
__a : str = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__lowercase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ) -> Tuple[str, list]:
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__lowercase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowercase = list(filter(lambda lowerCAmelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__lowercase = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__lowercase = ''' ''' + output_txt
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__lowercase = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = '''Unicode €.'''
__lowercase = tokenizer(lowerCAmelCase__ )
__lowercase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''Unicode €.</s>''' )
__lowercase = tokenizer('''e è é ê ë''' )
__lowercase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__lowercase = list(batch.input_ids.numpy()[0] )
else:
__lowercase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowercase = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization. </s>''']
__lowercase = ['''Summary of the text. </s>''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowercase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['''input_ids'''][0] )
self.assertEqual(lowerCAmelCase__ , batch['''labels'''][0] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
__lowercase = [F"<extra_id_{i}>" for i in range(1_25 )]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__lowercase = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = 0
__lowercase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 210
| 0
|
'''simple docstring'''
def A_( A : str):
UpperCamelCase = [int(A) for i in ip_va_address.split('.') if i.isdigit()]
return len(A) == 4 and all(0 <= int(A) <= 254 for octet in octets)
if __name__ == "__main__":
lowerCAmelCase : int = input().strip()
lowerCAmelCase : Optional[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 251
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 251
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : torch.FloatTensor
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : List[Any]=3, _lowerCamelCase : List[str]=3, _lowerCamelCase : Optional[int]=("DownEncoderBlock2D",), _lowerCamelCase : List[str]=(64,), _lowerCamelCase : int=2, _lowerCamelCase : str=32, _lowerCamelCase : Optional[Any]="silu", _lowerCamelCase : Optional[int]=True, ):
'''simple docstring'''
super().__init__()
__A = layers_per_block
__A = torch.nn.Convad(
_lowerCamelCase, block_out_channels[0], kernel_size=3, stride=1, padding=1, )
__A = None
__A = nn.ModuleList([] )
# down
__A = block_out_channels[0]
for i, down_block_type in enumerate(_lowerCamelCase ):
__A = output_channel
__A = block_out_channels[i]
__A = i == len(_lowerCamelCase ) - 1
__A = get_down_block(
_lowerCamelCase, num_layers=self.layers_per_block, in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, add_downsample=not is_final_block, resnet_eps=1e-6, downsample_padding=0, resnet_act_fn=_lowerCamelCase, resnet_groups=_lowerCamelCase, attention_head_dim=_lowerCamelCase, temb_channels=_lowerCamelCase, )
self.down_blocks.append(_lowerCamelCase )
# mid
__A = UNetMidBlockaD(
in_channels=block_out_channels[-1], resnet_eps=1e-6, resnet_act_fn=_lowerCamelCase, output_scale_factor=1, resnet_time_scale_shift='''default''', attention_head_dim=block_out_channels[-1], resnet_groups=_lowerCamelCase, temb_channels=_lowerCamelCase, )
# out
__A = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=_lowerCamelCase, eps=1e-6 )
__A = nn.SiLU()
__A = 2 * out_channels if double_z else out_channels
__A = nn.Convad(block_out_channels[-1], _lowerCamelCase, 3, padding=1 )
__A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = x
__A = self.conv_in(_lowerCamelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCamelCase : Optional[Any] ):
def custom_forward(*_lowerCamelCase : Optional[Any] ):
return module(*_lowerCamelCase )
return custom_forward
# down
if is_torch_version('''>=''', '''1.11.0''' ):
for down_block in self.down_blocks:
__A = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCamelCase ), _lowerCamelCase, use_reentrant=_lowerCamelCase )
# middle
__A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ), _lowerCamelCase, use_reentrant=_lowerCamelCase )
else:
for down_block in self.down_blocks:
__A = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ), _lowerCamelCase )
# middle
__A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ), _lowerCamelCase )
else:
# down
for down_block in self.down_blocks:
__A = down_block(_lowerCamelCase )
# middle
__A = self.mid_block(_lowerCamelCase )
# post-process
__A = self.conv_norm_out(_lowerCamelCase )
__A = self.conv_act(_lowerCamelCase )
__A = self.conv_out(_lowerCamelCase )
return sample
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : List[str]=3, _lowerCamelCase : List[Any]=3, _lowerCamelCase : Dict=("UpDecoderBlock2D",), _lowerCamelCase : List[str]=(64,), _lowerCamelCase : str=2, _lowerCamelCase : List[str]=32, _lowerCamelCase : Dict="silu", _lowerCamelCase : Optional[int]="group", ):
'''simple docstring'''
super().__init__()
__A = layers_per_block
__A = nn.Convad(
_lowerCamelCase, block_out_channels[-1], kernel_size=3, stride=1, padding=1, )
__A = None
__A = nn.ModuleList([] )
__A = in_channels if norm_type == '''spatial''' else None
# mid
__A = UNetMidBlockaD(
in_channels=block_out_channels[-1], resnet_eps=1e-6, resnet_act_fn=_lowerCamelCase, output_scale_factor=1, resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type, attention_head_dim=block_out_channels[-1], resnet_groups=_lowerCamelCase, temb_channels=_lowerCamelCase, )
# up
__A = list(reversed(_lowerCamelCase ) )
__A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCamelCase ):
__A = output_channel
__A = reversed_block_out_channels[i]
__A = i == len(_lowerCamelCase ) - 1
__A = get_up_block(
_lowerCamelCase, num_layers=self.layers_per_block + 1, in_channels=_lowerCamelCase, out_channels=_lowerCamelCase, prev_output_channel=_lowerCamelCase, add_upsample=not is_final_block, resnet_eps=1e-6, resnet_act_fn=_lowerCamelCase, resnet_groups=_lowerCamelCase, attention_head_dim=_lowerCamelCase, temb_channels=_lowerCamelCase, resnet_time_scale_shift=_lowerCamelCase, )
self.up_blocks.append(_lowerCamelCase )
__A = output_channel
# out
if norm_type == "spatial":
__A = SpatialNorm(block_out_channels[0], _lowerCamelCase )
else:
__A = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=_lowerCamelCase, eps=1e-6 )
__A = nn.SiLU()
__A = nn.Convad(block_out_channels[0], _lowerCamelCase, 3, padding=1 )
__A = False
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
__A = z
__A = self.conv_in(_lowerCamelCase )
__A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCamelCase : str ):
def custom_forward(*_lowerCamelCase : Optional[int] ):
return module(*_lowerCamelCase )
return custom_forward
if is_torch_version('''>=''', '''1.11.0''' ):
# middle
__A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ), _lowerCamelCase, _lowerCamelCase, use_reentrant=_lowerCamelCase )
__A = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__A = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase, use_reentrant=_lowerCamelCase )
else:
# middle
__A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ), _lowerCamelCase, _lowerCamelCase )
__A = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__A = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCamelCase ), _lowerCamelCase, _lowerCamelCase )
else:
# middle
__A = self.mid_block(_lowerCamelCase, _lowerCamelCase )
__A = sample.to(_lowerCamelCase )
# up
for up_block in self.up_blocks:
__A = up_block(_lowerCamelCase, _lowerCamelCase )
# post-process
if latent_embeds is None:
__A = self.conv_norm_out(_lowerCamelCase )
else:
__A = self.conv_norm_out(_lowerCamelCase, _lowerCamelCase )
__A = self.conv_act(_lowerCamelCase )
__A = self.conv_out(_lowerCamelCase )
return sample
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any], _lowerCamelCase : int, _lowerCamelCase : str, _lowerCamelCase : Tuple, _lowerCamelCase : Any=None, _lowerCamelCase : Tuple="random", _lowerCamelCase : Dict=False, _lowerCamelCase : Any=True ):
'''simple docstring'''
super().__init__()
__A = n_e
__A = vq_embed_dim
__A = beta
__A = legacy
__A = nn.Embedding(self.n_e, self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e )
__A = remap
if self.remap is not None:
self.register_buffer('''used''', torch.tensor(np.load(self.remap ) ) )
__A = self.used.shape[0]
__A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__A = self.re_embed
__A = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__A = n_e
__A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : int ):
'''simple docstring'''
__A = inds.shape
assert len(_lowerCamelCase ) > 1
__A = inds.reshape(ishape[0], -1 )
__A = self.used.to(_lowerCamelCase )
__A = (inds[:, :, None] == used[None, None, ...]).long()
__A = match.argmax(-1 )
__A = match.sum(2 ) < 1
if self.unknown_index == "random":
__A = torch.randint(0, self.re_embed, size=new[unknown].shape ).to(device=new.device )
else:
__A = self.unknown_index
return new.reshape(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__A = inds.shape
assert len(_lowerCamelCase ) > 1
__A = inds.reshape(ishape[0], -1 )
__A = self.used.to(_lowerCamelCase )
if self.re_embed > self.used.shape[0]: # extra token
__A = 0 # simply set to zero
__A = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, _lowerCamelCase )
return back.reshape(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Any ):
'''simple docstring'''
# reshape z -> (batch, height, width, channel) and flatten
__A = z.permute(0, 2, 3, 1 ).contiguous()
__A = z.view(-1, self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__A = torch.argmin(torch.cdist(_lowerCamelCase, self.embedding.weight ), dim=1 )
__A = self.embedding(_lowerCamelCase ).view(z.shape )
__A = None
__A = None
# compute loss for embedding
if not self.legacy:
__A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__A = z + (z_q - z).detach()
# reshape back to match original input shape
__A = z_q.permute(0, 3, 1, 2 ).contiguous()
if self.remap is not None:
__A = min_encoding_indices.reshape(z.shape[0], -1 ) # add batch axis
__A = self.remap_to_used(_lowerCamelCase )
__A = min_encoding_indices.reshape(-1, 1 ) # flatten
if self.sane_index_shape:
__A = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__A = indices.reshape(shape[0], -1 ) # add batch axis
__A = self.unmap_to_all(_lowerCamelCase )
__A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__A = self.embedding(_lowerCamelCase )
if shape is not None:
__A = z_q.view(_lowerCamelCase )
# reshape back to match original input shape
__A = z_q.permute(0, 3, 1, 2 ).contiguous()
return z_q
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : str, _lowerCamelCase : List[Any]=False ):
'''simple docstring'''
__A = parameters
__A , __A = torch.chunk(_lowerCamelCase, 2, dim=1 )
__A = torch.clamp(self.logvar, -30.0, 20.0 )
__A = deterministic
__A = torch.exp(0.5 * self.logvar )
__A = torch.exp(self.logvar )
if self.deterministic:
__A = __A = torch.zeros_like(
self.mean, device=self.parameters.device, dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Optional[torch.Generator] = None ):
'''simple docstring'''
# make sure sample is on the same device as the parameters and has same dtype
__A = randn_tensor(
self.mean.shape, generator=_lowerCamelCase, device=self.parameters.device, dtype=self.parameters.dtype )
__A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2 ) + self.var - 1.0 - self.logvar, dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar, dim=[1, 2, 3], )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Optional[Any], _lowerCamelCase : Tuple=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2 ) / self.var, dim=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return self.mean
| 266
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase_ = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowercase_ = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowercase_ = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='''https://github.com/krishnap25/mauve''', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/krishnap25/mauve'''], reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
], )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : str, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any=None, _lowerCamelCase : Tuple=None, _lowerCamelCase : Optional[Any]=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str="auto", _lowerCamelCase : Union[str, Any]=-1, _lowerCamelCase : List[str]=0.9, _lowerCamelCase : int=5, _lowerCamelCase : Tuple=5_00, _lowerCamelCase : Union[str, Any]="gpt2-large", _lowerCamelCase : int=-1, _lowerCamelCase : Union[str, Any]=10_24, _lowerCamelCase : Union[str, Any]=25, _lowerCamelCase : str=5, _lowerCamelCase : Any=True, _lowerCamelCase : Union[str, Any]=25, ):
'''simple docstring'''
__A = compute_mauve(
p_text=_lowerCamelCase, q_text=_lowerCamelCase, p_features=_lowerCamelCase, q_features=_lowerCamelCase, p_tokens=_lowerCamelCase, q_tokens=_lowerCamelCase, num_buckets=_lowerCamelCase, pca_max_data=_lowerCamelCase, kmeans_explained_var=_lowerCamelCase, kmeans_num_redo=_lowerCamelCase, kmeans_max_iter=_lowerCamelCase, featurize_model_name=_lowerCamelCase, device_id=_lowerCamelCase, max_text_length=_lowerCamelCase, divergence_curve_discretization_size=_lowerCamelCase, mauve_scaling_factor=_lowerCamelCase, verbose=_lowerCamelCase, seed=_lowerCamelCase, )
return out
| 266
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase = 't5'
lowercase = ['past_key_values']
lowercase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self ,__UpperCamelCase=3_2128 ,__UpperCamelCase=512 ,__UpperCamelCase=64 ,__UpperCamelCase=2048 ,__UpperCamelCase=6 ,__UpperCamelCase=None ,__UpperCamelCase=8 ,__UpperCamelCase=32 ,__UpperCamelCase=128 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=1.0 ,__UpperCamelCase="relu" ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,**__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = vocab_size
lowercase_ : str = d_model
lowercase_ : str = d_kv
lowercase_ : List[Any] = d_ff
lowercase_ : List[str] = num_layers
lowercase_ : Tuple = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : Union[str, Any] = num_heads
lowercase_ : Tuple = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : Optional[Any] = dropout_rate
lowercase_ : str = layer_norm_epsilon
lowercase_ : List[str] = initializer_factor
lowercase_ : int = feed_forward_proj
lowercase_ : Optional[Any] = use_cache
lowercase_ : Optional[Any] = self.feed_forward_proj.split('-' )
lowercase_ : Dict = act_info[-1]
lowercase_ : List[str] = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Dict = 'gelu_new'
super().__init__(
pad_token_id=__a ,eos_token_id=__a ,is_encoder_decoder=__a ,**__a ,)
class UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase_ : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase_ : Tuple = 'past_encoder_sequence + sequence'
lowercase_ : Dict = {0: 'batch'}
lowercase_ : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase_ : Tuple = {0: 'batch', 1: 'decoder_sequence'}
lowercase_ : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a ,direction='inputs' )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return 13
| 350
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321
| 0
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_lowerCamelCase : Optional[Any] = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48000,
'sample_size': 131072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
}
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return torch.atana(_UpperCAmelCase , _UpperCAmelCase ) / math.pi * 2
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = torch.sin(t * math.pi / 2 ) ** 2
A_ : Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCAmelCase , _UpperCAmelCase )
class lowercase ( __UpperCAmelCase):
pass
class lowercase ( nn.Module):
def __init__( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
A_ : List[Any] = DiffusionAttnUnetaD(_lowerCamelCase , n_attn_layers=4 )
A_ : Optional[Any] = deepcopy(self.diffusion )
A_ : Union[str, Any] = torch.quasirandom.SobolEngine(1 , scramble=_lowerCamelCase )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = MODELS_MAP[model_name]['''url''']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
_lowerCamelCase : List[str] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
_lowerCamelCase : Tuple = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
_lowerCamelCase : Any = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
_lowerCamelCase : Dict = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
_lowerCamelCase : Tuple = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
_lowerCamelCase : Optional[int] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return name.replace(_UpperCAmelCase , _UpperCAmelCase )
elif name.startswith(_UpperCAmelCase ):
return [name.replace(_UpperCAmelCase , _UpperCAmelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=13 ):
"""simple docstring"""
A_ : Optional[int] = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
A_ : Optional[Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
A_ : Dict = string[6:]
elif string.startswith('''net.''' ):
A_ : int = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
A_ : Optional[int] = string[7:]
if string.startswith('''main.''' ):
A_ : int = string[5:]
# mid block
if string[:2].isdigit():
A_ : List[str] = string[:2]
A_ : Union[str, Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Tuple = string[1:]
if depth == max_depth:
A_ : Union[str, Any] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = '''mid_block'''
elif depth > 0 and int(_UpperCAmelCase ) < 7:
A_ : List[str] = DOWN_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = f"""down_blocks.{depth}"""
elif depth > 0 and int(_UpperCAmelCase ) > 7:
A_ : Optional[Any] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
A_ : List[Any] = DEPTH_0_TO_LAYER[layer_num]
A_ : str = f"""up_blocks.{max_depth - 1}""" if int(_UpperCAmelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
A_ : Any = string_left[1:]
if "resnets" in new_layer:
A_ : List[Any] = convert_resconv_naming(_UpperCAmelCase )
elif "attentions" in new_layer:
A_ : str = convert_attn_naming(_UpperCAmelCase )
A_ : List[Any] = new_string_left
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Union[str, Any] = prefix + '''.''' + new_layer + '''.''' + string_left
else:
A_ : str = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
A_ : Optional[Any] = rename(_UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
A_ : str = transform_conv_attns(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
A_ : Any = v
return new_state_dict
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
A_ : List[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : int = v.shape[0]
A_ : Dict = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : str = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : Optional[int] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A_ : Dict = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
A_ : str = download(_UpperCAmelCase )
A_ : Dict = MODELS_MAP[model_name]['''sample_rate''']
A_ : Dict = MODELS_MAP[model_name]['''sample_size''']
A_ : Union[str, Any] = Object()
A_ : Union[str, Any] = sample_size
A_ : Optional[int] = sample_rate
A_ : Dict = 0
A_ : Tuple = UNetaDModel(sample_size=_UpperCAmelCase , sample_rate=_UpperCAmelCase )
A_ : int = diffusers_model.state_dict()
A_ : Optional[Any] = DiffusionUncond(_UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCAmelCase )['''state_dict'''] )
A_ : Union[str, Any] = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : Tuple = rename_orig_weights(_UpperCAmelCase )
A_ : Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCAmelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('''kernel''' ) for k in list(_UpperCAmelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
A_ : Dict = value.squeeze()
A_ : Dict = value
diffusers_model.load_state_dict(_UpperCAmelCase )
A_ : Dict = 100
A_ : List[Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=_UpperCAmelCase )
A_ : Tuple = torch.manual_seed(_UpperCAmelCase )
A_ : str = torch.randn([1, 2, config.sample_size] , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
A_ : Any = torch.linspace(1 , 0 , steps + 1 , device=_UpperCAmelCase )[:-1]
A_ : str = get_crash_schedule(_UpperCAmelCase )
A_ : List[str] = DanceDiffusionPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
A_ : int = torch.manual_seed(33 )
A_ : Optional[int] = pipe(num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase ).audios
A_ : str = sampling.iplms_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {} )
A_ : List[Any] = generated.clamp(-1 , 1 )
A_ : int = (generated - audio).abs().sum()
A_ : List[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , _UpperCAmelCase )
print('''Diff max''' , _UpperCAmelCase )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
_lowerCamelCase : int = parser.parse_args()
main(args)
| 167
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = """convnextv2"""
def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=2_24 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : str = num_channels
A_ : int = patch_size
A_ : Union[str, Any] = num_stages
A_ : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A_ : Any = [3, 3, 9, 3] if depths is None else depths
A_ : Optional[int] = hidden_act
A_ : Tuple = initializer_range
A_ : int = layer_norm_eps
A_ : List[Any] = drop_path_rate
A_ : Union[str, Any] = image_size
A_ : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
A_ , A_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 167
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
|
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( a__ ) -> List[str]:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += [key]
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
def __lowerCAmelCase ( *a__ ) -> str:
def decorator(a__ ):
__a = getattr(a__ , '''handle_key''' , [] )
handle += keys
setattr(a__ , '''handle_key''' , a__ )
return func
return decorator
class __A( a ):
def __new__( cls , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , '''key_handler''' ):
setattr(_snake_case , '''key_handler''' , {} )
setattr(_snake_case , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
__a = getattr(_snake_case , '''handle_key''' , [] )
for key in handled_keys:
__a = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> List[str]:
'''simple docstring'''
__a = get_character()
if char != KEYMAP["undefined"]:
__a = ord(_snake_case )
__a = cls.key_handler.get(_snake_case )
if handler:
__a = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 33
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a :Union[str, Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : str = """"""
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : str = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : str = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[-config.hidden_size :]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = val
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Dict = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE__ : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE__ : Tuple = 1000
SCREAMING_SNAKE_CASE__ : List[str] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE__ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE__ : List[str] = 192
SCREAMING_SNAKE_CASE__ : Any = 768
SCREAMING_SNAKE_CASE__ : Any = 12
SCREAMING_SNAKE_CASE__ : List[Any] = 3
elif deit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE__ : List[str] = 384
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1536
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Any = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE__ : Tuple = 1024
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 4096
SCREAMING_SNAKE_CASE__ : Optional[Any] = 24
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 16
# load original model from timm
SCREAMING_SNAKE_CASE__ : int = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timm_model.state_dict()
SCREAMING_SNAKE_CASE__ : Tuple = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE__ : int = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE__ : Dict = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : int = encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE__ : Tuple = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a :Union[str, Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 132
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'sentencepiece.model'}
UpperCAmelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCAmelCase__ = {
'google/rembert': 256,
}
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : Optional[int]="[UNK]" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : str="[PAD]" , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Any="[MASK]" , **_lowerCamelCase : Optional[int] , ):
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , **_lowerCamelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(_lowerCamelCase )
@property
def lowercase ( self : int ):
return len(self.sp_model )
def lowercase ( self : Any ):
_snake_case = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : List[str] , _lowerCamelCase : Tuple ):
_snake_case = d
_snake_case = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowercase ( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
_snake_case = self.sp_model.EncodeAsPieces(_lowerCamelCase )
return pieces
def lowercase ( self : str , _lowerCamelCase : str ):
return self.sp_model.PieceToId(_lowerCamelCase )
def lowercase ( self : List[str] , _lowerCamelCase : int ):
return self.sp_model.IdToPiece(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Any ):
_snake_case = self.sp_model.decode_pieces(_lowerCamelCase )
return out_string
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowerCamelCase ) )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 288
| 0
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Tuple = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
lowercase__ : List[Any] = {
"b0": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1_408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1_536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1_792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2_304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2_560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Optional[int]:
a = EfficientNetConfig()
a = CONFIG_MAP[model_name]["hidden_dim"]
a = CONFIG_MAP[model_name]["width_coef"]
a = CONFIG_MAP[model_name]["depth_coef"]
a = CONFIG_MAP[model_name]["image_size"]
a = CONFIG_MAP[model_name]["dropout_rate"]
a = CONFIG_MAP[model_name]["dw_padding"]
a = "huggingface/label-files"
a = "imagenet-1k-id2label.json"
a = 10_00
a = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset") , "r"))
a = {int(__UpperCamelCase): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
a = "http://images.cocodataset.org/val2017/000000039769.jpg"
a = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase).raw)
return im
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Union[str, Any]:
a = CONFIG_MAP[model_name]["image_size"]
a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=__UpperCamelCase , )
return preprocessor
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")]
a = sorted(set(__UpperCamelCase))
a = len(__UpperCamelCase)
a = {b: str(__UpperCamelCase) for b, i in zip(__UpperCamelCase , range(__UpperCamelCase))}
a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight"))
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight"))
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias"))
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean"))
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var"))
for b in block_names:
a = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight'''))
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight'''))
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias'''))
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean'''))
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var'''))
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight'''))
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight'''))
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias'''))
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean'''))
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var'''))
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight'''))
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias'''))
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight'''))
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias'''))
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight'''))
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight'''))
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias'''))
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean'''))
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var'''))
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight"))
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight"))
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias"))
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean"))
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var"))
a = {}
for item in rename_keys:
if item[0] in original_param_names:
a = "efficientnet." + item[1]
a = "classifier.weight"
a = "classifier.bias"
return key_mapping
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
a = key_mapping[key]
if "_conv" in key and "kernel" in key:
a = torch.from_numpy(__UpperCamelCase).permute(3 , 2 , 0 , 1)
elif "depthwise_kernel" in key:
a = torch.from_numpy(__UpperCamelCase).permute(2 , 3 , 0 , 1)
elif "kernel" in key:
a = torch.from_numpy(np.transpose(__UpperCamelCase))
else:
a = torch.from_numpy(__UpperCamelCase)
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__UpperCamelCase)
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[str]:
a = model_classes[model_name](
include_top=__UpperCamelCase , weights="imagenet" , input_tensor=__UpperCamelCase , input_shape=__UpperCamelCase , pooling=__UpperCamelCase , classes=10_00 , classifier_activation="softmax" , )
a = original_model.trainable_variables
a = original_model.non_trainable_variables
a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
a = param.numpy()
a = list(tf_params.keys())
# Load HuggingFace model
a = get_efficientnet_config(__UpperCamelCase)
a = EfficientNetForImageClassification(__UpperCamelCase).eval()
a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters...")
a = rename_keys(__UpperCamelCase)
replace_params(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Initialize preprocessor and preprocess input image
a = convert_image_processor(__UpperCamelCase)
a = preprocessor(images=prepare_img() , return_tensors="pt")
# HF model inference
hf_model.eval()
with torch.no_grad():
a = hf_model(**__UpperCamelCase)
a = outputs.logits.detach().numpy()
# Original model inference
a = False
a = CONFIG_MAP[model_name]["image_size"]
a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST)
a = image.img_to_array(__UpperCamelCase)
a = np.expand_dims(__UpperCamelCase , axis=0)
a = original_model.predict(__UpperCamelCase)
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3), "The predicted logits are not the same."
print("Model outputs match!")
if save_model:
# Create folder to save model
if not os.path.isdir(__UpperCamelCase):
os.mkdir(__UpperCamelCase)
# Save converted model and image processor
hf_model.save_pretrained(__UpperCamelCase)
preprocessor.save_pretrained(__UpperCamelCase)
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''')
a = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__UpperCamelCase)
hf_model.push_to_hub(__UpperCamelCase)
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
lowercase__ : str = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 355
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a__ ( pl.LightningModule ):
def __init__( self , A ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
a = model
a = 2
a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> List[Any]:
# load longformer model from model identifier
a = LongformerModel.from_pretrained(__UpperCamelCase)
a = LightningModel(__UpperCamelCase)
a = torch.load(__UpperCamelCase , map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
a = LongformerForQuestionAnswering.from_pretrained(__UpperCamelCase)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__UpperCamelCase)
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''')
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ : Union[str, Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 180
| 0
|
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger('''transformers.models.encodec''')
lowerCAmelCase__ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCAmelCase__ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCAmelCase__ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCAmelCase__ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCAmelCase__ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCAmelCase__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for attribute in key.split("." ):
lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
lowercase__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
lowercase__ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowercase__ : Tuple = value
elif weight_type == "weight_v":
lowercase__ : Optional[int] = value
elif weight_type == "bias":
lowercase__ : Tuple = value
elif weight_type == "running_mean":
lowercase__ : str = value
elif weight_type == "running_var":
lowercase__ : int = value
elif weight_type == "num_batches_tracked":
lowercase__ : int = value
elif weight_type == "weight_ih_l0":
lowercase__ : List[Any] = value
elif weight_type == "weight_hh_l0":
lowercase__ : Any = value
elif weight_type == "bias_ih_l0":
lowercase__ : Tuple = value
elif weight_type == "bias_hh_l0":
lowercase__ : Any = value
elif weight_type == "weight_ih_l1":
lowercase__ : Any = value
elif weight_type == "weight_hh_l1":
lowercase__ : int = value
elif weight_type == "bias_ih_l1":
lowercase__ : Union[str, Any] = value
elif weight_type == "bias_hh_l1":
lowercase__ : int = value
else:
lowercase__ : Union[str, Any] = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase__ , lowercase__ : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase__ : Union[str, Any] = MAPPING_24K
elif model_name == "encodec_48khz":
lowercase__ : Optional[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info(F"""{name} was ignored""" )
continue
lowercase__ : Optional[int] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase__ , lowercase__ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
lowercase__ : Optional[int] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowercase__ : Any = True
if "*" in mapped_key:
lowercase__ : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
lowercase__ : Optional[int] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
lowercase__ : List[Any] = "weight_g"
elif "weight_v" in name:
lowercase__ : Optional[Any] = "weight_v"
elif "weight_ih_l0" in name:
lowercase__ : str = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowercase__ : List[str] = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowercase__ : Optional[Any] = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowercase__ : Union[str, Any] = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowercase__ : str = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowercase__ : Optional[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowercase__ : Dict = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowercase__ : Union[str, Any] = "bias_hh_l1"
elif "bias" in name:
lowercase__ : Dict = "bias"
elif "weight" in name:
lowercase__ : Tuple = "weight"
elif "running_mean" in name:
lowercase__ : Any = "running_mean"
elif "running_var" in name:
lowercase__ : List[str] = "running_var"
elif "num_batches_tracked" in name:
lowercase__ : str = "num_batches_tracked"
else:
lowercase__ : Dict = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
"""simple docstring"""
if config_path is not None:
lowercase__ : Tuple = EncodecConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
lowercase__ : Tuple = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase__ : List[Any] = [8, 5, 4, 4]
lowercase__ : Union[str, Any] = [2.2]
lowercase__ : Optional[int] = 64
lowercase__ : Optional[Any] = 32_000
lowercase__ : Optional[Any] = 2_048
lowercase__ : List[str] = False
lowercase__ : Optional[Any] = False
lowercase__ : Dict = False
elif model_name == "encodec_48khz":
lowercase__ : Any = [8, 5, 4, 2]
lowercase__ : Optional[Any] = [3.0, 6.0, 12.0, 24.0]
lowercase__ : Tuple = 48_000
lowercase__ : int = 2
lowercase__ : Any = False
lowercase__ : int = "time_group_norm"
lowercase__ : Tuple = True
lowercase__ : int = 1.0
lowercase__ : str = 0.01
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowercase__ : List[str] = EncodecModel(SCREAMING_SNAKE_CASE__ )
lowercase__ : Tuple = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase__ : List[Any] = original_checkpoint["best_state"]
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 130
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 364
|
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1
| 0
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Any = False , **UpperCAmelCase : Any , ):
super().__init__(features=A__ , cache_dir=A__ , keep_in_memory=A__ , **A__ )
A_ = Sql(
cache_dir=A__ , features=A__ , sql=A__ , con=A__ , **A__ , )
def __A ( self : str ):
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , )
# Build dataset for splits
A_ = self.builder.as_dataset(
split="train" , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : int = None , **UpperCAmelCase : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
A_ = dataset
A_ = name
A_ = con
A_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ = num_proc
A_ = to_sql_kwargs
def __A ( self : Tuple ):
A_ = self.to_sql_kwargs.pop("sql" , A__ )
A_ = self.to_sql_kwargs.pop("con" , A__ )
A_ = self.to_sql_kwargs.pop("index" , A__ )
A_ = self._write(index=A__ , **self.to_sql_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : Dict ):
A_ , A_ , A_ = args
A_ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
A_ = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ = batch.to_pandas()
A_ = df.to_sql(self.name , self.con , index=A__ , **A__ )
return num_rows or len(A__ )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
A_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
A_ , A_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 312
|
from functools import lru_cache
@lru_cache
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 0
|
def lowerCamelCase__ ( __lowercase : Tuple ) -> Union[str, Any]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_snake_case = len(__lowercase )
_snake_case = max(__lowercase )
_snake_case = min(__lowercase )
# create the counting array
_snake_case = coll_max + 1 - coll_min
_snake_case = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __lowercase ):
_snake_case = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_snake_case = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __lowercase ) ):
_snake_case = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCamelCase__ ( __lowercase : str ) -> int:
return "".join([chr(__lowercase ) for i in counting_sort([ord(__lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
_lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 356
|
from functools import lru_cache
@lru_cache
def a_ ( __lowercase : int ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
| 0
|
"""simple docstring"""
import socket
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCAmelCase = socket.gethostname()
lowerCAmelCase = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowerCAmelCase = sock.recv(10_24 )
if not data:
break
out_file.write(lowercase__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 46
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = (3, 32, 128)
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) )
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
__lowerCAmelCase : Union[str, Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) )
return image_input
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__lowerCAmelCase : int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' )
__lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Any = '''test'''
__lowerCAmelCase : Dict = processor(text=A_ )
__lowerCAmelCase : str = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = '''test'''
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Optional[int] = processor.char_decode(A_ )
__lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ )
__lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_image_processor()
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 )
__lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 )
__lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 )
__lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 275
| 0
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCamelCase ( a , a , a , a , a ) -> np.array:
'''simple docstring'''
__magic_name__ = int(np.ceil((x_end - xa) / step_size ) )
__magic_name__ = np.zeros((n + 1,) )
__magic_name__ = ya
__magic_name__ = xa
for k in range(a ):
__magic_name__ = y[k] + step_size * ode_func(a , y[k] )
__magic_name__ = y[k] + (
(step_size / 2) * (ode_func(a , y[k] ) + ode_func(x + step_size , a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Tuple , a__ : Union[str, Any] , a__ : Union[str, Any]=7 , a__ : Dict=3 , a__ : Optional[Any]=18 , a__ : Optional[Any]=30 , a__ : Tuple=400 , a__ : Optional[int]=True , a__ : int=None , a__ : Union[str, Any]=True , a__ : Optional[Any]=None , a__ : str=True , a__ : List[Any]=[0.5, 0.5, 0.5] , a__ : Tuple=[0.5, 0.5, 0.5] , a__ : Union[str, Any]=False , ):
__magic_name__ = size if size is not None else {'''height''': 20, '''width''': 20}
__magic_name__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = min_resolution
__magic_name__ = max_resolution
__magic_name__ = do_resize
__magic_name__ = size
__magic_name__ = do_center_crop
__magic_name__ = crop_size
__magic_name__ = do_normalize
__magic_name__ = image_mean
__magic_name__ = image_std
__magic_name__ = do_reduce_labels
def snake_case__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> str:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(dataset[0]['''file'''] )
__magic_name__ = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ = Image.open(ds[0]['''file'''] )
__magic_name__ = Image.open(ds[1]['''file'''] )
__magic_name__ = Image.open(ds[2]['''file'''] )
__magic_name__ = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Optional[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
__magic_name__ = BeitImageProcessingTester(self )
@property
def snake_case__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a__ , '''center_crop''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
self.assertTrue(hasattr(a__ , '''image_mean''' ) )
self.assertTrue(hasattr(a__ , '''image_std''' ) )
def snake_case__ ( self : int ):
__magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
__magic_name__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def snake_case__ ( self : Optional[Any] ):
pass
def snake_case__ ( self : Dict ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Optional[int] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : List[str] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__magic_name__ = image_processing(a__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case__ ( self : Union[str, Any] ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
__magic_name__ = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__magic_name__ = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
__magic_name__ , __magic_name__ = prepare_semantic_batch_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def snake_case__ ( self : Any ):
# Initialize image_processing
__magic_name__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__magic_name__ , __magic_name__ = prepare_semantic_single_inputs()
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
__magic_name__ = True
__magic_name__ = image_processing(a__ , a__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 98
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCamelCase : Tuple = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['pixel_values']
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Optional[Any] = size if size is not None else {"shortest_edge": 256}
UpperCamelCase : List[Any] = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" )
UpperCamelCase : Optional[int] = do_resize
UpperCamelCase : str = size
UpperCamelCase : str = resample
UpperCamelCase : Optional[int] = do_center_crop
UpperCamelCase : List[Any] = crop_size
UpperCamelCase : Optional[int] = do_rescale
UpperCamelCase : Any = rescale_factor
UpperCamelCase : Optional[Any] = do_normalize
UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase : Any = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
UpperCamelCase : Dict = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ , A_ = None , **A_ ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
UpperCamelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Tuple = size if size is not None else self.size
UpperCamelCase : Any = get_size_dict(A_ , default_to_square=A_ )
UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample
UpperCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : str = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : List[Any] = get_size_dict(A_ , param_name="crop_size" )
UpperCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase : List[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase : Tuple = [to_numpy_array(A_ ) for image in images]
if do_resize:
UpperCamelCase : Union[str, Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
UpperCamelCase : Optional[Any] = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
UpperCamelCase : int = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
UpperCamelCase : str = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
UpperCamelCase : List[str] = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase : List[Any] = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A_ ):
UpperCamelCase : Tuple = target_sizes.numpy()
UpperCamelCase : Tuple = []
for idx in range(len(A_ ) ):
UpperCamelCase : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A_ )
UpperCamelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
UpperCamelCase : Optional[Any] = logits.argmax(dim=1 )
UpperCamelCase : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 52
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
A =logging.get_logger(__name__)
A =TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (_a : List[DatasetType] , _a : Optional[List[float]] = None , _a : Optional[int] = None , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
else:
return _interleave_iterable_datasets(
_a , _a , _a , info=_a , split=_a , stopping_strategy=_a )
def snake_case_ (_a : List[DatasetType] , _a : Optional[DatasetInfo] = None , _a : Optional[NamedSplit] = None , _a : int = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_a ):
if not isinstance(_a , (Dataset, IterableDataset) ):
if isinstance(_a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(_a )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_a ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_a ).__name__}." )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(_a , _a ) else (IterableDataset, Dataset)
)
elif not isinstance(_a , _a ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_a , info=_a , split=_a , axis=_a )
else:
return _concatenate_iterable_datasets(_a , info=_a , split=_a , axis=_a )
| 34
| 0
|
from __future__ import annotations
from random import random
class _a :
'''simple docstring'''
def __init__( self , A__ = None ):
A__ : Optional[int] = value
A__ : Any = random()
A__ : Node | None = None
A__ : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
A__ : str = str(self.value ) + """ """
A__ : Tuple = str(self.left or """""" )
A__ : int = str(self.right or """""" )
return value + left + right
def UpperCamelCase (lowercase_: Node | None , lowercase_: int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A__ , A__ : List[str] = split(root.left , lowercase_ )
return left, root
else:
A__ , A__ : Tuple = split(root.right , lowercase_ )
return root, right
def UpperCamelCase (lowercase_: Node | None , lowercase_: Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A__ : int = merge(left.right , lowercase_ )
return left
else:
A__ : Optional[int] = merge(lowercase_ , right.left )
return right
def UpperCamelCase (lowercase_: Node | None , lowercase_: int ) -> Node | None:
A__ : Any = Node(lowercase_ )
A__ , A__ : Dict = split(lowercase_ , lowercase_ )
return merge(merge(lowercase_ , lowercase_ ) , lowercase_ )
def UpperCamelCase (lowercase_: Node | None , lowercase_: int ) -> Node | None:
A__ , A__ : List[Any] = split(lowercase_ , value - 1 )
A__ , A__ : int = split(lowercase_ , lowercase_ )
return merge(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def UpperCamelCase (lowercase_: Node | None , lowercase_: str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
A__ : Any = insert(lowercase_ , int(arg[1:] ) )
elif arg[0] == "-":
A__ : Union[str, Any] = erase(lowercase_ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def UpperCamelCase () -> None:
A__ : Optional[int] = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
A__ : int = input()
while args != "q":
A__ : Tuple = interact_treap(lowercase_ , lowercase_ )
print(lowercase_ )
A__ : List[str] = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 141
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class lowercase :
def __init__( self):
lowercase = []
lowercase = 0
lowercase = 0
def A__ ( self):
return self.head == self.tail
def A__ ( self ,A__):
self.data.append(A__)
lowercase = self.tail + 1
def A__ ( self):
lowercase = self.data[self.head]
lowercase = self.head + 1
return ret
def A__ ( self):
return self.tail - self.head
def A__ ( self):
print(self.data)
print('''**************''')
print(self.data[self.head : self.tail])
class lowercase :
def __init__( self ,A__):
lowercase = data
lowercase = None
lowercase = None
lowercase = 1
def A__ ( self):
return self.data
def A__ ( self):
return self.left
def A__ ( self):
return self.right
def A__ ( self):
return self.height
def A__ ( self ,A__):
lowercase = data
def A__ ( self ,A__):
lowercase = node
def A__ ( self ,A__):
lowercase = node
def A__ ( self ,A__):
lowercase = height
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if a > b:
return a
return b
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
print('''left rotation node:''' , node.get_data() )
lowercase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
lowercase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
print('''right rotation node:''' , node.get_data() )
lowercase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
lowercase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase = right_rotation(lowerCAmelCase__ )
else:
lowercase = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase = rl_rotation(lowerCAmelCase__ )
else:
lowercase = left_rotation(lowerCAmelCase__ )
lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = root.get_right()
if right_child is None:
break
lowercase = right_child
return root.get_data()
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
while True:
lowercase = root.get_left()
if left_child is None:
break
lowercase = left_child
return root.get_data()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = root.get_left()
lowercase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
lowercase = left_child
elif right_child is not None:
lowercase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase = left_rotation(lowerCAmelCase__ )
else:
lowercase = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase = right_rotation(lowerCAmelCase__ )
else:
lowercase = lr_rotation(lowerCAmelCase__ )
lowercase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class lowercase :
def __init__( self):
lowercase = None
def A__ ( self):
return get_height(self.root)
def A__ ( self ,A__):
print('''insert:''' + str(A__))
lowercase = insert_node(self.root ,A__)
def A__ ( self ,A__):
print('''delete:''' + str(A__))
if self.root is None:
print('''Tree is empty!''')
return
lowercase = del_node(self.root ,A__)
def __str__( self ,): # a level traversale, gives a more intuitive look on the tree
lowercase = ''''''
lowercase = MyQueue()
q.push(self.root)
lowercase = self.get_height()
if layer == 0:
return output
lowercase = 0
while not q.is_empty():
lowercase = q.pop()
lowercase = ''' ''' * int(math.pow(2 ,layer - 1))
output += space
if node is None:
output += "*"
q.push(A__)
q.push(A__)
else:
output += str(node.get_data())
q.push(node.get_left())
q.push(node.get_right())
output += space
lowercase = cnt + 1
for i in range(1_0_0):
if cnt == math.pow(2 ,A__) - 1:
lowercase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCamelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowercase__ :Union[str, Any] = AVLtree()
lowercase__ :List[str] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 101
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=2 ,A__=3 ,A__=4 ,A__=2 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=3_6 ,A__=3 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=6 ,A__=6 ,A__=3 ,A__=4 ,A__=None ,A__=1_0_0_0 ,):
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = text_seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size)
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels)
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = LayoutLMvaModel(config=A__)
model.to(A__)
model.eval()
# text + image
lowercase = model(A__ ,pixel_values=A__)
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,bbox=A__ ,pixel_values=A__ ,token_type_ids=A__)
lowercase = model(A__ ,bbox=A__ ,pixel_values=A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
# text only
lowercase = model(A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size))
# image only
lowercase = model(pixel_values=A__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = LayoutLMvaForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = LayoutLMvaForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = LayoutLMvaForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,bbox=A__ ,pixel_values=A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =False
lowercase_ : Any =False
lowercase_ : Tuple =False
lowercase_ : Any =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def A__ ( self):
lowercase = LayoutLMvaModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = copy.deepcopy(A__)
if model_class in get_values(A__):
lowercase = {
k: v.unsqueeze(1).expand(-1 ,self.model_tester.num_choices ,-1).contiguous()
if isinstance(A__ ,torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A__):
lowercase = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in get_values(A__):
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
elif model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=A__ ,)
return inputs_dict
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
@slow
def A__ ( self):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LayoutLMvaModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return LayoutLMvaImageProcessor(apply_ocr=A__) if is_vision_available() else None
@slow
def A__ ( self):
lowercase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''').to(A__)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ ,return_tensors='''pt''').pixel_values.to(A__)
lowercase = torch.tensor([[1, 2]])
lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
lowercase = model(
input_ids=input_ids.to(A__) ,bbox=bbox.to(A__) ,pixel_values=pixel_values.to(A__) ,)
# verify the logits
lowercase = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape ,A__)
lowercase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]]).to(A__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,A__ ,atol=1E-4))
| 101
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a : int = 'base_with_context'
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case_ = weights[F"layers_{lyr_num}"]
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
snake_case_ = ly_weight['''attention''']
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case_ = weights[F"layers_{lyr_num}"]
snake_case_ = ly_weight['''attention''']
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=__UpperCAmelCase )
snake_case_ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case_ = weights[F"layers_{lyr_num}"]
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
snake_case_ = ly_weight['''self_attention''']
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
snake_case_ = ly_weight['''MultiHeadDotProductAttention_0''']
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
snake_case_ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case_ = jnp.tree_util.tree_map(onp.array, __UpperCAmelCase )
snake_case_ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
snake_case_ = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' )
snake_case_ = inference.parse_training_gin_file(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = inference.InferenceModel(args.checkpoint_path, __UpperCAmelCase )
snake_case_ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' )
snake_case_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
snake_case_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
snake_case_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
snake_case_ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], __UpperCAmelCase )
snake_case_ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], __UpperCAmelCase )
snake_case_ = load_decoder(ta_checkpoint['''target''']['''decoder'''], __UpperCAmelCase )
snake_case_ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
snake_case_ = SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase, continuous_encoder=__UpperCAmelCase, decoder=__UpperCAmelCase, scheduler=__UpperCAmelCase, melgan=__UpperCAmelCase, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a : List[str] = parser.parse_args()
main(args)
| 72
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class a ( unittest.TestCase ):
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[Any] ):
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : List[Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Optional[int] ):
snake_case_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase_ ) )
def A_ ( self : Union[str, Any] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Any ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : int ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : str ):
snake_case_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
snake_case_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
def A_ ( self : List[str] ):
snake_case_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
snake_case_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase_ , variant=lowercase_ ) )
| 72
| 1
|
lowercase_ : str = [
(10_00, 'M'),
(9_00, 'CM'),
(5_00, 'D'),
(4_00, 'CD'),
(1_00, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while place < len(snake_case_ ):
if (place + 1 < len(snake_case_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = []
for arabic, roman in ROMAN:
((_UpperCAmelCase) , (_UpperCAmelCase)) = divmod(snake_case_ , snake_case_ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase_ : Optional[Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowercase_ : str = []
lowercase_ : int = []
lowercase_ : Dict = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowercase_ : int = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
lowercase_ : int = 0
for log in Path().glob('*.log'):
lowercase_ : int = 0
with open(log, 'r') as f:
for line in f:
lowercase_ : List[str] = json.loads(line)
if line.get('nodeid', '') != "":
lowercase_ : List[str] = line['nodeid']
if line.get('duration', None) is not None:
lowercase_ : Tuple = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase_ : List[Any] = []
log.unlink()
lowercase_ : int = ''
lowercase_ : int = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowercase_ : Optional[Any] = []
lowercase_ : Any = {}
for test in failed_tests:
lowercase_ : List[str] = test[0].split('::')
lowercase_ : int = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowercase_ : Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase_ : Any = [test[0] for test in failed_table]
lowercase_ : Optional[Any] = list(set(files))
# Count number of instances in failed_tests
lowercase_ : Optional[Any] = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase_ : Optional[Any] = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
lowercase_ : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
lowercase_ : Union[str, Any] = len(err) + 10
lowercase_ : Tuple = message[: 30_00 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
lowercase_ : int = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowercase_ : Union[str, Any] = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowercase_ : List[Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowercase_ : List[str] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowercase_ : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowercase_ : Any = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase_ : Optional[int] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase_ : Tuple = row[0]
else:
lowercase_ : Tuple = ''
lowercase_ : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 133
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 359
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = order
# a_{0} ... a_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase = [0.0] * self.order
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
if len(_lowercase ) < self.order:
_lowerCAmelCase = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
_lowerCAmelCase = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
_lowerCAmelCase = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(_lowercase )}'
)
raise ValueError(_lowercase )
_lowerCAmelCase = a_coeffs
_lowerCAmelCase = b_coeffs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase = self.input_history[:-1]
_lowerCAmelCase = self.output_history[:-1]
_lowerCAmelCase = sample
_lowerCAmelCase = result
return result
| 229
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.