code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a ( snake_case__: Optional[Any] , snake_case__: int=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def a ( snake_case__: Optional[int] , snake_case__: str=0 ):
'''simple docstring'''
lowercase_ = []
for old_item in old_list:
lowercase_ = old_item.replace('''in_layers.0''' , '''norm1''' )
lowercase_ = new_item.replace('''in_layers.2''' , '''conv1''' )
lowercase_ = new_item.replace('''out_layers.0''' , '''norm2''' )
lowercase_ = new_item.replace('''out_layers.3''' , '''conv2''' )
lowercase_ = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
lowercase_ = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
lowercase_ = shave_segments(UpperCAmelCase_ , n_shave_prefix_segments=UpperCAmelCase_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( snake_case__: Tuple , snake_case__: Optional[int]=0 ):
'''simple docstring'''
lowercase_ = []
for old_item in old_list:
lowercase_ = old_item
lowercase_ = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
lowercase_ = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
lowercase_ = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
lowercase_ = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
lowercase_ = shave_segments(UpperCAmelCase_ , n_shave_prefix_segments=UpperCAmelCase_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( snake_case__: Optional[int] , snake_case__: Any , snake_case__: Tuple , snake_case__: Dict=None , snake_case__: List[Any]=None , snake_case__: Optional[Any]=None ):
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase_ = old_checkpoint[path]
lowercase_ = old_tensor.shape[0] // 3
lowercase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase_ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase_ = old_tensor.split(channels // num_heads , dim=1 )
lowercase_ = query.reshape(UpperCAmelCase_ )
lowercase_ = key.reshape(UpperCAmelCase_ )
lowercase_ = value.reshape(UpperCAmelCase_ )
for path in paths:
lowercase_ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase_ = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
lowercase_ = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
lowercase_ = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase_ = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase_ = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase_ = old_checkpoint[path['''old''']]
def a ( snake_case__: Union[str, Any] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = checkpoint['''time_embed.0.weight''']
lowercase_ = checkpoint['''time_embed.0.bias''']
lowercase_ = checkpoint['''time_embed.2.weight''']
lowercase_ = checkpoint['''time_embed.2.bias''']
lowercase_ = checkpoint['''input_blocks.0.0.weight''']
lowercase_ = checkpoint['''input_blocks.0.0.bias''']
lowercase_ = checkpoint['''out.0.weight''']
lowercase_ = checkpoint['''out.0.bias''']
lowercase_ = checkpoint['''out.2.weight''']
lowercase_ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase_ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(UpperCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
lowercase_ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(UpperCAmelCase_ )
}
# Retrieves the keys for the output blocks only
lowercase_ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase_ = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(UpperCAmelCase_ )
}
for i in range(1 , UpperCAmelCase_ ):
lowercase_ = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase_ = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase_ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
lowercase_ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowercase_ = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
lowercase_ = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
lowercase_ = renew_resnet_paths(UpperCAmelCase_ )
lowercase_ = {'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowercase_ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase_ )
if len(UpperCAmelCase_ ):
lowercase_ = renew_attention_paths(UpperCAmelCase_ )
lowercase_ = {
'''old''': F'''input_blocks.{i}.1''',
'''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ = {
F'''input_blocks.{i}.1.qkv.bias''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase_ , config=UpperCAmelCase_ , )
lowercase_ = middle_blocks[0]
lowercase_ = middle_blocks[1]
lowercase_ = middle_blocks[2]
lowercase_ = renew_resnet_paths(UpperCAmelCase_ )
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , config=UpperCAmelCase_ )
lowercase_ = renew_resnet_paths(UpperCAmelCase_ )
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , config=UpperCAmelCase_ )
lowercase_ = renew_attention_paths(UpperCAmelCase_ )
lowercase_ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , attention_paths_to_split=UpperCAmelCase_ , config=UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
lowercase_ = i // (config['''num_res_blocks'''] + 1)
lowercase_ = i % (config['''num_res_blocks'''] + 1)
lowercase_ = [shave_segments(UpperCAmelCase_ , 2 ) for name in output_blocks[i]]
lowercase_ = {}
for layer in output_block_layers:
lowercase_ = layer.split('''.''' )[0], shave_segments(UpperCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase_ )
else:
lowercase_ = [layer_name]
if len(UpperCAmelCase_ ) > 1:
lowercase_ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
lowercase_ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
lowercase_ = renew_resnet_paths(UpperCAmelCase_ )
lowercase_ = renew_resnet_paths(UpperCAmelCase_ )
lowercase_ = {'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase_ = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase_ = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
lowercase_ = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase_ ) == 2:
lowercase_ = []
if len(UpperCAmelCase_ ):
lowercase_ = renew_attention_paths(UpperCAmelCase_ )
lowercase_ = {
'''old''': F'''output_blocks.{i}.1''',
'''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowercase_ = {
F'''output_blocks.{i}.1.qkv.bias''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=UpperCAmelCase_ , )
else:
lowercase_ = renew_resnet_paths(UpperCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase_ = '''.'''.join(['''output_blocks''', str(UpperCAmelCase_ ), path['''old''']] )
lowercase_ = '''.'''.join(['''up_blocks''', str(UpperCAmelCase_ ), '''resnets''', str(UpperCAmelCase_ ), path['''new''']] )
lowercase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__a = parser.parse_args()
__a = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__a = json.loads(f.read())
__a = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__a = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__a = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__a = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__a = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 30 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="attention" ):
"""simple docstring"""
a :Optional[int] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
a :int = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=False ):
"""simple docstring"""
if split_mlp_wi:
a :int = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
a :Dict = (wi_a, wi_a)
else:
a :Optional[Any] = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
a :Dict = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __lowerCamelCase ( UpperCAmelCase_ : dict , *, UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :str = traverse_util.flatten_dict(variables['''target'''] )
a :Any = {'''/'''.join(UpperCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
a :Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase_ )
a :Optional[Any] = collections.OrderedDict()
# Shared embeddings.
a :Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_attention_layer_norm''' )
a , a , a , a :Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''attention''' )
a :List[Any] = layer_norm
a :str = k.T
a :Dict = o.T
a :int = q.T
a :Optional[Any] = v.T
# Block i, layer 1 (MLP).
a :Tuple = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''encoder''' , UpperCAmelCase_ )
a :Any = layer_norm
if split_mlp_wi:
a :Any = wi[0].T
a :Tuple = wi[1].T
else:
a :List[str] = wi.T
a :List[Any] = wo.T
a :Union[str, Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
a :Optional[Any] = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase_ ):
# Block i, layer 0 (Self Attention).
a :List[str] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
a , a , a , a :List[Any] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''self_attention''' )
a :List[Any] = layer_norm
a :Tuple = k.T
a :int = o.T
a :Any = q.T
a :Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
a :str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
a , a , a , a :Any = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''encoder_decoder_attention''' )
a :str = layer_norm
a :Optional[Any] = k.T
a :Any = o.T
a :Dict = q.T
a :Optional[Any] = v.T
# Block i, layer 2 (MLP).
a :Optional[int] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , '''pre_mlp_layer_norm''' )
a , a :List[Any] = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , '''decoder''' , UpperCAmelCase_ )
a :Optional[int] = layer_norm
if split_mlp_wi:
a :int = wi[0].T
a :Tuple = wi[1].T
else:
a :str = wi.T
a :Dict = wo.T
a :Any = old['''decoder/decoder_norm/scale''']
a :Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
a :Union[str, Any] = old['''decoder/logits_dense/kernel'''].T
return new
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : bool ):
"""simple docstring"""
a :List[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
a :Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
a :Tuple = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
a :Optional[Any] = state_dict['''shared.weight''']
return state_dict
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Tuple = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
a :Optional[int] = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_ )
a :Tuple = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
"""simple docstring"""
a :List[Any] = TaConfig.from_json_file(UpperCAmelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
a :Any = TaEncoderModel(UpperCAmelCase_ )
else:
a :List[str] = TaForConditionalGeneration(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase_ )
print('''Done''' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
snake_case : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 94 | 0 |
'''simple docstring'''
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''microsoft/speecht5_tts'''
snake_case = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
snake_case = '''text_reader'''
snake_case = SpeechTaProcessor
snake_case = SpeechTaForTextToSpeech
snake_case = SpeechTaHifiGan
snake_case = ['''text''']
snake_case = ['''audio''']
def lowerCAmelCase ( self : str ):
'''simple docstring'''
if self.post_processor is None:
_A = "microsoft/speecht5_hifigan"
super().setup()
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]=None ):
'''simple docstring'''
_A = self.pre_processor(text=__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
_A = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
_A = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__UpperCAmelCase ).cpu().detach()
| 370 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''vqvae''']
def __init__( self : List[str] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Dict=True , ):
'''simple docstring'''
_A = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_A = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_A = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
_A = noise
_A = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
_A = self.mel.audio_slice_to_image(__UpperCAmelCase )
_A = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
_A = (input_image / 255) * 2 - 1
_A = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_A = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
_A = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
_A = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_A = int(mask_start_secs * pixels_per_second )
_A = int(mask_end_secs * pixels_per_second )
_A = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["sample"]
else:
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
if isinstance(self.scheduler , __UpperCAmelCase ):
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
else:
_A = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
_A = mask[:, step, :, :mask_start]
if mask_end > 0:
_A = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_A = 1 / self.vqvae.config.scaling_factor * images
_A = self.vqvae.decode(__UpperCAmelCase )["sample"]
_A = (images / 2 + 0.5).clamp(0 , 1 )
_A = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_A = (images * 255).round().astype("uint8" )
_A = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
_A = [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
_A = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
_A = (sample / 255) * 2 - 1
_A = torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_A = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_A = self.scheduler.alphas_cumprod[t]
_A = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_A = 1 - alpha_prod_t
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
_A = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_A = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_A = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A = acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 174 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
SCREAMING_SNAKE_CASE :Optional[Any] = 2_9979_2458
# Symbols
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = symbols('ct x y z')
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(a_ ) ** 2 )
def UpperCAmelCase ( a_ ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(a_ ), -gamma(a_ ) * beta(a_ ), 0, 0],
[-gamma(a_ ) * beta(a_ ), gamma(a_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCAmelCase ( a_ , a_ = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
__A = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
SCREAMING_SNAKE_CASE :Any = transform(2997_9245)
print('Example of four vector: ')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
SCREAMING_SNAKE_CASE :Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
SCREAMING_SNAKE_CASE :Optional[Any] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 15 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :str = 'tf_model.h5'
SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :str = 'model.ckpt'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :str = 'config.json'
SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[int] = '▁'
SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 15 | 1 |
"""simple docstring"""
import heapq
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCAmelCase__ , [-1 * len(lowerCAmelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ = heapq.heappop(lowerCAmelCase__ )[1][0]
chosen_vertices.add(lowerCAmelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ = elem[1][1].index(lowerCAmelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCAmelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 241 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 241 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[10, 20, 30, 40] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , ) -> Any:
_A : Tuple = parent
_A : int = batch_size
_A : int = image_size
_A : List[str] = num_channels
_A : Optional[int] = embeddings_size
_A : int = hidden_sizes
_A : Any = depths
_A : Dict = is_training
_A : Union[str, Any] = use_labels
_A : List[str] = hidden_act
_A : Any = num_labels
_A : int = scope
_A : Any = len(_a )
def a__ ( self ) -> Tuple:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Optional[Any] = None
if self.use_labels:
_A : Tuple = ids_tensor([self.batch_size] , self.num_labels )
_A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = TFRegNetModel(config=_a )
_A : Optional[Any] = model(_a , training=_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = self.num_labels
_A : str = TFRegNetForImageClassification(_a )
_A : List[Any] = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self ) -> Optional[Any]:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[str] = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_a = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = TFRegNetModelTester(self )
_A : str = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self ) -> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def a__ ( self ) -> List[Any]:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self ) -> Any:
pass
def a__ ( self ) -> str:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : Optional[Any] = [*signature.parameters.keys()]
_A : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Dict:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Dict:
def check_hidden_states_output(_a , _a , _a ):
_A : int = model_class(_a )
_A : Optional[Any] = model(**self._prepare_for_class(_a , _a ) , training=_a )
_A : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A : Dict = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Tuple = layer_type
_A : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Any = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> Dict:
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_a , _a , _a , _a={} ):
_A : Dict = model(_a , return_dict=_a , **_a )
_A : int = model(_a , return_dict=_a , **_a ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a , _a ):
recursive_check(_a , _a )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_a , _a ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(_a , _a )
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : str = self._prepare_for_class(_a , _a )
_A : Dict = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a )
_A : Optional[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
_A : List[str] = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a )
_A : Optional[Any] = self._prepare_for_class(_a , _a )
_A : List[Any] = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
_A : int = self._prepare_for_class(_a , _a , return_labels=_a )
_A : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a , {"""output_hidden_states""": True} )
def a__ ( self ) -> List[Any]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : List[Any] = TFRegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self ) -> Dict:
_A : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_A : str = self.default_image_processor
_A : Dict = prepare_img()
_A : int = image_processor(images=_a , return_tensors="""tf""" )
# forward pass
_A : Any = model(**_a , training=_a )
# verify the logits
_A : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : str = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _a , atol=1e-4 )
| 26 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "ctrl"
snake_case_ : Optional[int] = ["past_key_values"]
snake_case_ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , snake_case__ : List[str]=246_534 , snake_case__ : Optional[Any]=256 , snake_case__ : List[str]=1_280 , snake_case__ : Optional[int]=8_192 , snake_case__ : List[Any]=48 , snake_case__ : Dict=16 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1e-6 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=True , **snake_case__ : List[str] , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**snake_case__ )
| 133 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowerCAmelCase (__UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase =emb.weight.shape
__UpperCamelCase =nn.Linear(a__ , a__ , bias=a__ )
__UpperCamelCase =emb.weight.data
return lin_layer
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Tuple="facebook/mbart-large-en-ro" , __UpperCamelCase : Dict=False , __UpperCamelCase : str=False ):
"""simple docstring"""
__UpperCamelCase =torch.load(a__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(a__ )
__UpperCamelCase =state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCamelCase =MBartConfig.from_pretrained(a__ , vocab_size=a__ )
if mbart_aa and finetuned:
__UpperCamelCase ='''relu'''
__UpperCamelCase =state_dict['''decoder.embed_tokens.weight''']
__UpperCamelCase =MBartForConditionalGeneration(a__ )
model.model.load_state_dict(a__ )
if finetuned:
__UpperCamelCase =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
__lowercase = parser.parse_args()
__lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 360 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =0
# if input_string is "aba" than new_input_string become "a|b|a"
__UpperCamelCase =''''''
__UpperCamelCase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__UpperCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__UpperCamelCase , __UpperCamelCase =0, 0
# length[i] shows the length of palindromic substring with center i
__UpperCamelCase =[1 for i in range(len(__UpperCamelCase ) )]
# for each character in new_string find corresponding palindromic string
__UpperCamelCase =0
for j in range(len(__UpperCamelCase ) ):
__UpperCamelCase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__UpperCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__UpperCamelCase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__UpperCamelCase =j - k + 1 # noqa: E741
__UpperCamelCase =j + k - 1
# update max_length and start position
if max_length < length[j]:
__UpperCamelCase =length[j]
__UpperCamelCase =j
# create that string
__UpperCamelCase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Any ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.dummy_uncond_unet
__magic_name__ = PNDMScheduler()
__magic_name__ = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pndm.to(UpperCamelCase__ )
pndm.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="""numpy""" ).images
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCamelCase__ )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = """google/ddpm-cifar10-32"""
__magic_name__ = UNetaDModel.from_pretrained(UpperCamelCase__ )
__magic_name__ = PNDMScheduler()
__magic_name__ = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pndm.to(UpperCamelCase__ )
pndm.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pndm(generator=UpperCamelCase__ , output_type="""numpy""" ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCAmelCase_ ( a__ , a__):
lowerCamelCase__ = 1
@register_to_config
def __init__( self, __a = 1000, __a = None):
'''simple docstring'''
self.set_timesteps(SCREAMING_SNAKE_CASE_)
# standard deviation of the initial noise distribution
_lowerCAmelCase : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
_lowerCAmelCase : Any = 4
# running values
_lowerCAmelCase : Union[str, Any] = []
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = num_inference_steps
_lowerCAmelCase : str = torch.linspace(1, 0, num_inference_steps + 1)[:-1]
_lowerCAmelCase : List[Any] = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
_lowerCAmelCase : Tuple = torch.tensor(self.config.trained_betas, dtype=torch.floataa)
else:
_lowerCAmelCase : str = torch.sin(steps * math.pi / 2) ** 2
_lowerCAmelCase : str = (1.0 - self.betas**2) ** 0.5
_lowerCAmelCase : Tuple = (torch.atana(self.betas, self.alphas) / math.pi * 2)[:-1]
_lowerCAmelCase : Dict = timesteps.to(SCREAMING_SNAKE_CASE_)
_lowerCAmelCase : List[Any] = []
def snake_case__ ( self, __a, __a, __a, __a = True, ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler")
_lowerCAmelCase : Optional[int] = (self.timesteps == timestep).nonzero().item()
_lowerCAmelCase : Union[str, Any] = timestep_index + 1
_lowerCAmelCase : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_)
if len(self.ets) == 1:
_lowerCAmelCase : Optional[int] = self.ets[-1]
elif len(self.ets) == 2:
_lowerCAmelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
_lowerCAmelCase : Any = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
_lowerCAmelCase : Optional[int] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
_lowerCAmelCase : Any = self._get_prev_sample(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_)
def snake_case__ ( self, __a, *__a, **__a):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.alphas[timestep_index]
_lowerCAmelCase : List[Any] = self.betas[timestep_index]
_lowerCAmelCase : int = self.alphas[prev_timestep_index]
_lowerCAmelCase : Optional[Any] = self.betas[prev_timestep_index]
_lowerCAmelCase : int = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_, 1E-8)
_lowerCAmelCase : Any = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self):
'''simple docstring'''
return self.config.num_train_timesteps
| 365 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["BeitFeatureExtractor"]
_snake_case = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A : int = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A : Tuple = "sshleifer/mar_enro_6_3_student"
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __lowercase ( self : List[Any] ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ : Any = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def __lowercase ( self : str ) -> str:
MarianMTModel.from_pretrained(lowerCamelCase )
@slow
@require_torch_gpu
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : str = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowerCAmelCase_ : Dict = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowerCAmelCase_ : Optional[int] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase_ : Tuple = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase_ : Tuple = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
lowerCAmelCase_ : Any = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationModule.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Dict = main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : int = load_json(model.metrics_save_path )
lowerCAmelCase_ : Optional[Any] = metrics["""val"""][0]
lowerCAmelCase_ : Tuple = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Any = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : List[str] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __lowercase ( self : Optional[Any] ) -> Dict:
lowerCAmelCase_ : List[str] = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
lowerCAmelCase_ : Dict = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 1_28,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowerCAmelCase_ : int = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowerCAmelCase_ : str = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowerCAmelCase_ : Tuple = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowerCAmelCase_ : Optional[int] = bash_script.replace(lowerCamelCase , str(lowerCamelCase ) )
lowerCAmelCase_ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
lowerCAmelCase_ : Dict = 6
lowerCAmelCase_ : List[Any] = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'--num_train_epochs={epochs}',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase , """argv""" , lowerCamelCase ):
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
lowerCAmelCase_ : int = pl.Trainer.add_argparse_args(lowerCamelCase )
lowerCAmelCase_ : List[str] = SummarizationDistiller.add_model_specific_args(lowerCamelCase , os.getcwd() )
lowerCAmelCase_ : List[Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase_ : str = distill_main(lowerCamelCase )
# Check metrics
lowerCAmelCase_ : Union[str, Any] = load_json(model.metrics_save_path )
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][0]
lowerCAmelCase_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , lowerCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase_ : Union[str, Any] = os.listdir(lowerCamelCase )
lowerCAmelCase_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowerCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCamelCase )
lowerCAmelCase_ : int = torch.load(lowerCamelCase , map_location="""cpu""" )
lowerCAmelCase_ : Tuple = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase_ : Union[str, Any] = {os.path.basename(lowerCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 89 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for char in word:
snake_case_ = ord(snake_case )
if not _is_chinese_char(snake_case ):
return 0
return 1
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
snake_case_ = set()
for token in tokens:
snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case )
if chinese_word:
word_set.add(snake_case )
snake_case_ = list(snake_case )
return word_list
def UpperCamelCase_( snake_case : List[str] , snake_case : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case_ = max([len(snake_case ) for w in chinese_word_set] )
snake_case_ = bert_tokens
snake_case_ , snake_case_ = 0, len(snake_case )
while start < end:
snake_case_ = True
if is_chinese(bert_word[start] ):
snake_case_ = min(end - start , snake_case )
for i in range(snake_case , 1 , -1 ):
snake_case_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case_ = "##" + bert_word[j]
snake_case_ = start + i
snake_case_ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ):
'''simple docstring'''
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
snake_case_ = [get_chinese_word(snake_case ) for r in res]
ltp_res.extend(snake_case )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for i in range(0 , len(snake_case ) , 1_0_0 ):
snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 )
bert_res.extend(res["input_ids"] )
assert len(snake_case ) == len(snake_case )
snake_case_ = []
for input_ids, chinese_word in zip(snake_case , snake_case ):
snake_case_ = []
for id in input_ids:
snake_case_ = bert_tokenizer._convert_id_to_token(snake_case )
input_tokens.append(snake_case )
snake_case_ = add_sub_symbol(snake_case , snake_case )
snake_case_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case ):
if token[:2] == "##":
snake_case_ = token[2:]
# save chinese tokens' pos
if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ):
ref_id.append(snake_case )
ref_ids.append(snake_case )
assert len(snake_case ) == len(snake_case )
return ref_ids
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case_ = LTP(args.ltp ) # faster in GPU device
snake_case_ = BertTokenizer.from_pretrained(args.bert )
snake_case_ = prepare_ref(snake_case , snake_case , snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids]
f.writelines(snake_case )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
main(args)
| 85 |
def _a ( SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[Any] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__lowerCAmelCase: str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = GPTSwaTokenizer
snake_case__ = False
snake_case__ = True
snake_case__ = False
def a ( self : Dict ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = "This is a test"
lowerCAmelCase__ = "This is a test"
return input_text, output_text
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = "<s>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2_000 )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def a ( self : List[str] ) -> int:
lowerCAmelCase__ = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [465, 287, 265, 631, 842] )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def a ( self : List[Any] ) -> int:
lowerCAmelCase__ = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCAmelCase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : Any ) -> str:
lowerCAmelCase__ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCAmelCase__ = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=SCREAMING_SNAKE_CASE__ , )
| 367 |
import argparse
from collections import defaultdict
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = F'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = F'class {class_name}('
lowerCAmelCase__ = F'{4 * " "}def {test_name}('
lowerCAmelCase__ = F'{8 * " "}{correct_line.split()[0]}'
lowerCAmelCase__ = F'{16 * " "}{correct_line.split()[0]}'
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
for line in lines:
if line.startswith(lowerCAmelCase_ ):
lowerCAmelCase__ = True
elif in_class and line.startswith(lowerCAmelCase_ ):
lowerCAmelCase__ = True
elif in_class and in_func and (line.startswith(lowerCAmelCase_ ) or line.startswith(lowerCAmelCase_ )):
lowerCAmelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'{spaces * " "}{correct_line}' )
lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = False
else:
new_lines.append(lowerCAmelCase_ )
with open(lowerCAmelCase_ , "w" ) as f:
for line in new_lines:
f.write(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=None ):
"""simple docstring"""
if fail is not None:
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase__ = None
with open(lowerCAmelCase_ , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = defaultdict(lowerCAmelCase_ )
for line in correct_lines:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
UpperCamelCase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 221 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ : Optional[int] = random.Random()
def a__ ( lowercase : Dict, lowercase : Any=1.0, lowercase : Optional[Any]=None, lowercase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
_UpperCamelCase = global_rng
_UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : int=400 , lowerCAmelCase__ : int=2000 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : List[Any]=16000 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : str=True , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = min_seq_length
_UpperCamelCase = max_seq_length
_UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase = feature_size
_UpperCamelCase = padding_value
_UpperCamelCase = sampling_rate
_UpperCamelCase = return_attention_mask
_UpperCamelCase = do_normalize
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Any=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(lowerCAmelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
_UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Tuple = WavaVecaFeatureExtractor
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = WavaVecaFeatureExtractionTester(self )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase = np.asarray(lowerCAmelCase__ )
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
_UpperCamelCase = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = range(800 , 1400 , 200 )
_UpperCamelCase = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase = ['''longest''', '''max_length''', '''do_not_pad''']
_UpperCamelCase = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case__ ( self : Any ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
_UpperCamelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
import torch
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 324 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_UpperCamelCase = iter(lowercase )
while True:
_UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) )
if not chunk:
return
yield chunk
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase = ''''''
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = prepare_input(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 324 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Optional[int] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 287 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase__ : List[Any] = parse(importlib.metadata.version('torch'))
def a__ ( lowercase : Union[str, Version], lowercase : str, lowercase : str ) -> List[str]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_UpperCamelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase, lowercase ):
_UpperCamelCase = parse(importlib.metadata.version(lowercase ) )
return operation(lowercase, parse(lowercase ) )
def a__ ( lowercase : str, lowercase : str ) -> List[Any]:
"""simple docstring"""
return compare_versions(lowercase, lowercase, lowercase )
| 287 | 1 |
"""simple docstring"""
import cmath
import math
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = math.radians(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = math.radians(_lowerCAmelCase )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE__ : List[str] = cmath.rect(_lowerCAmelCase ,_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = cmath.rect(_lowerCAmelCase ,_lowerCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300 | 0 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> int:
lowerCAmelCase__ : Dict = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = len(matrix[0] )
lowerCAmelCase__ : Tuple = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for row in range(SCREAMING_SNAKE_CASE_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : str = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ : Dict = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE_ ):
if matrix[i][row] != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = matrix[i], matrix[row]
lowerCAmelCase__ : str = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : str = StableDiffusionInpaintPipeline
lowercase_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Optional[int] = frozenset([] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=snake_case_ , )
A_ : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
A_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
A_ : Any = CLIPTextModel(snake_case_ )
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self , snake_case_ , snake_case_=0 ):
"""simple docstring"""
A_ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : List[str] = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' ).resize((6_4, 6_4) )
A_ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(snake_case_ ).startswith('mps' ):
A_ : List[str] = torch.manual_seed(snake_case_ )
else:
A_ : Any = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
A_ : Tuple = self.get_dummy_components()
A_ : str = StableDiffusionInpaintPipeline(**snake_case_ )
A_ : str = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
A_ : Dict = self.get_dummy_inputs(snake_case_ )
A_ : Dict = sd_pipe(**snake_case_ ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : Optional[Any] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
A_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
A_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
A_ : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
A_ : Any = StableDiffusionInpaintPipeline.from_pretrained(snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A_ : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
A_ : str = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , output_type='np' , )
A_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
A_ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
A_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
A_ : Dict = 'stabilityai/stable-diffusion-2-inpainting'
A_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , safety_checker=snake_case_ , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
A_ : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
A_ : Tuple = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , output_type='np' , )
A_ : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
A_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
A_ : Union[str, Any] = 'stabilityai/stable-diffusion-2-inpainting'
A_ : int = PNDMScheduler.from_pretrained(snake_case_ , subfolder='scheduler' )
A_ : Dict = StableDiffusionInpaintPipeline.from_pretrained(
snake_case_ , safety_checker=snake_case_ , scheduler=snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
A_ : Dict = torch.manual_seed(0 )
A_ : int = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/grid.txt' ) as f:
__a : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(_SCREAMING_SNAKE_CASE ) for x in f.readline().split()] )
__a : Tuple = 0
# right
for i in range(20 ):
for j in range(17 ):
__a : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a : Optional[int] = temp
# down
for i in range(17 ):
for j in range(20 ):
__a : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a : Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__a : Dict = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__a : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a : Tuple = temp
return maximum
if __name__ == "__main__":
print(solution())
| 355 |
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :List[str] = KandinskyInpaintPipeline
_UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCAmelCase :Dict = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCAmelCase :Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :int = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase : Optional[int] = MultilingualCLIP(A_ )
UpperCamelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.dummy_text_encoder
UpperCamelCase : str = self.dummy_tokenizer
UpperCamelCase : List[Any] = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Optional[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : str = 0
if str(A_ ).startswith("mps" ):
UpperCamelCase : int = torch.manual_seed(A_ )
else:
UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "cpu"
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : str = self.pipeline_class(**A_ )
UpperCamelCase : Tuple = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : List[Any] = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __UpperCamelCase( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = "a hat"
UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Optional[Any] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Dict = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
| 52 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : List[str] =logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowercase : int) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_lowercase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_lowercase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_lowercase):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''')
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :str = ["pixel_values"]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 2_5_5 , __lowercase = True , __lowercase = None , __lowercase = None , **__lowercase , ) -> None:
"""simple docstring"""
super().__init__(**__lowercase )
a__ : str = size if size is not None else {"""shortest_edge""": 2_2_4}
a__ : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
a__ : Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
a__ : List[Any] = get_size_dict(__lowercase , param_name="""crop_size""" )
a__ : Tuple = do_resize
a__ : int = size
a__ : Optional[int] = do_center_crop
a__ : Optional[int] = crop_size
a__ : int = resample
a__ : List[str] = do_rescale
a__ : List[Any] = rescale_factor
a__ : List[str] = do_normalize
a__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ) -> np.ndarray:
"""simple docstring"""
a__ : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" in size:
a__ : Optional[int] = get_resize_output_image_size(__lowercase , size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
a__ : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
"""simple docstring"""
a__ : Dict = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
"""simple docstring"""
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
a__ : str = to_numpy_array(__lowercase )
if do_resize:
a__ : str = self.resize(image=__lowercase , size=__lowercase , resample=__lowercase )
if do_center_crop:
a__ : List[Any] = self.center_crop(__lowercase , size=__lowercase )
if do_rescale:
a__ : List[Any] = self.rescale(image=__lowercase , scale=__lowercase )
if do_normalize:
a__ : Any = self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase )
a__ : int = to_channel_dimension_format(__lowercase , __lowercase )
return image
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
"""simple docstring"""
a__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a__ : Optional[Any] = resample if resample is not None else self.resample
a__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : int = do_normalize if do_normalize is not None else self.do_normalize
a__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
a__ : Optional[Any] = image_std if image_std is not None else self.image_std
a__ : Union[str, Any] = size if size is not None else self.size
a__ : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
a__ : Tuple = crop_size if crop_size is not None else self.crop_size
a__ : Optional[Any] = get_size_dict(__lowercase , param_name="""crop_size""" )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
a__ : List[Any] = make_batched(__lowercase )
a__ : int = [
[
self._preprocess_image(
image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , )
for img in video
]
for video in videos
]
a__ : str = {"""pixel_values""": videos}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 355 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase : int ={
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =[
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowercase : Any =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 266 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _A ( A__ ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def _A ( A__ ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : str
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = {}
__lowercase = []
__lowercase = 1
__lowercase = [1, 2]
__lowercase = {'''a''': 1, '''b''': 2}
__lowercase = {'''a''': [1, 2], '''b''': [3, 4]}
__lowercase = {'''a''': {'''1''': 1}, '''b''': 2}
__lowercase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__lowercase = {}
__lowercase = []
__lowercase = 2
__lowercase = [2, 3]
__lowercase = {'''a''': 2, '''b''': 3}
__lowercase = {'''a''': [2, 3], '''b''': [4, 5]}
__lowercase = {'''a''': {'''1''': 2}, '''b''': 3}
__lowercase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ) ,lowercase__ )
__lowercase = 2
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
__lowercase = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
__lowercase = {'''a''': 2, '''b''': 0, '''c''': 2}
__lowercase = {
'''a''': np.eye(2 ).astype(lowercase__ ),
'''b''': np.zeros(3 ).astype(lowercase__ ),
'''c''': np.ones(2 ).astype(lowercase__ ),
}
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,map_numpy=lowercase__ ) ,lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ ,lowercase__ ,map_numpy=lowercase__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(lowercase__ ,lowercase__ ,map_numpy=lowercase__ ,num_proc=lowercase__ ) ,lowercase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowercase__ ,lowercase__ ,map_numpy=lowercase__ ,num_proc=lowercase__ ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(lowercase__ ): # can't pickle a local lambda
map_nested(lambda lowercase__ : x + 1 ,lowercase__ ,num_proc=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = {'''a''': 1, '''b''': 2}
__lowercase = {'''a''': 3, '''b''': 4}
__lowercase = {'''a''': 5, '''b''': 6}
__lowercase = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowercase__ ,lowercase__ ,lowercase__ ) ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 'bar'
__lowercase = Foo()
self.assertEqual(foo.my_attr ,'''bar''' )
with temporary_assignment(lowercase__ ,'''my_attr''' ,'''BAR''' ):
self.assertEqual(foo.my_attr ,'''BAR''' )
self.assertEqual(foo.my_attr ,'''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
__lowercase = {F"{i}": i for i in range(A__ )}
__lowercase = map_nested(lambda A__ : x + 10 , A__ , num_proc=A__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@require_tf
def SCREAMING_SNAKE_CASE ( self : Any ):
import tensorflow as tf
from tensorflow.keras import layers
__lowercase = layers.Dense(2 )
def gen_random_output():
__lowercase = tf.random.uniform((1, 3) )
return model(lowercase__ ).numpy()
with temp_seed(4_2 ,set_tensorflow=lowercase__ ):
__lowercase = gen_random_output()
with temp_seed(4_2 ,set_tensorflow=lowercase__ ):
__lowercase = gen_random_output()
__lowercase = gen_random_output()
np.testing.assert_equal(lowercase__ ,lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
import torch
def gen_random_output():
__lowercase = torch.nn.Linear(3 ,2 )
__lowercase = torch.rand(1 ,3 )
return model(lowercase__ ).detach().numpy()
with temp_seed(4_2 ,set_pytorch=lowercase__ ):
__lowercase = gen_random_output()
with temp_seed(4_2 ,set_pytorch=lowercase__ ):
__lowercase = gen_random_output()
__lowercase = gen_random_output()
np.testing.assert_equal(lowercase__ ,lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(4_2 ):
__lowercase = gen_random_output()
with temp_seed(4_2 ):
__lowercase = gen_random_output()
__lowercase = gen_random_output()
np.testing.assert_equal(lowercase__ ,lowercase__ )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def _A ( A__ ):
"""simple docstring"""
__lowercase = NestedDataStructure(A__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = NestedDataStructure(A__ ).flatten()
assert output == expected_output
def _A ( ):
"""simple docstring"""
__lowercase = A(x=1 , y='''foobar''' )
__lowercase = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(A__ ) == expected_output
__lowercase = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
__lowercase = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(A__ ) == expected_output
with pytest.raises(A__ ):
asdict([1, A(x=10 , y='''foo''' )] )
def _A ( A__ ):
"""simple docstring"""
return text.split()
def _A ( A__ ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _A ( ):
"""simple docstring"""
with Pool(2 ) as pool:
__lowercase = list(iflatmap_unordered(A__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(A__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowercase = list(iflatmap_unordered(A__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(A__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowercase = []
for yield_time, content in iflatmap_unordered(
A__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(A__ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(A__ ) == 4
| 104 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :List[Any] , snake_case :int , snake_case :int , snake_case :Optional[int] = None , snake_case :int = 50_257 , snake_case :int = 1_024 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :Optional[int] = None , snake_case :str = "gelu_new" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :float = 1e-5 , snake_case :float = 0.02 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = False , snake_case :bool = False , ):
'''simple docstring'''
super().__init__()
A_ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
A_ : List[Any] = prefix_inner_dim
A_ : Union[str, Any] = prefix_hidden_dim
A_ : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A_ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A_ : List[Any] = GPTaConfig(
vocab_size=snake_case , n_positions=snake_case , n_embd=snake_case , n_layer=snake_case , n_head=snake_case , n_inner=snake_case , activation_function=snake_case , resid_pdrop=snake_case , embd_pdrop=snake_case , attn_pdrop=snake_case , layer_norm_epsilon=snake_case , initializer_range=snake_case , scale_attn_weights=snake_case , use_cache=snake_case , scale_attn_by_inverse_layer_idx=snake_case , reorder_and_upcast_attn=snake_case , )
A_ : Optional[Any] = GPTaLMHeadModel(snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.Tensor , snake_case :torch.Tensor , snake_case :Optional[torch.Tensor] = None , snake_case :Optional[torch.Tensor] = None , ):
'''simple docstring'''
A_ : Any = self.transformer.transformer.wte(snake_case )
A_ : str = self.encode_prefix(snake_case )
A_ : Union[str, Any] = self.decode_prefix(snake_case )
A_ : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A_ : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A_ : int = torch.cat((dummy_token, input_ids) , dim=1 )
A_ : Union[str, Any] = self.transformer(inputs_embeds=snake_case , labels=snake_case , attention_mask=snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def SCREAMING_SNAKE_CASE ( self :str , snake_case :int , snake_case :torch.device ):
'''simple docstring'''
return torch.zeros(snake_case , self.prefix_length , dtype=torch.intaa , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :int ):
'''simple docstring'''
return self.encode_prefix(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Dict , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Any = torch.split(snake_case , 1 , dim=0 )
A_ : Optional[int] = []
A_ : Union[str, Any] = []
for feature in features:
A_ : Tuple = self.decode_prefix(feature.to(snake_case ) ) # back to the clip feature
# Only support beam search for now
A_ , A_ : Dict = self.generate_beam(
input_embeds=snake_case , device=snake_case , eos_token_id=snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A_ : int = torch.stack(snake_case )
A_ : int = torch.stack(snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :int=None , snake_case :str=None , snake_case :int=None , snake_case :int = 5 , snake_case :int = 67 , snake_case :float = 1.0 , snake_case :Optional[int] = None , ):
'''simple docstring'''
A_ : Optional[Any] = eos_token_id
A_ : List[Any] = None
A_ : List[Any] = None
A_ : str = torch.ones(snake_case , device=snake_case , dtype=torch.int )
A_ : Any = torch.zeros(snake_case , device=snake_case , dtype=torch.bool )
if input_embeds is not None:
A_ : Any = input_embeds
else:
A_ : Optional[Any] = self.transformer.transformer.wte(snake_case )
for i in range(snake_case ):
A_ : Optional[Any] = self.transformer(inputs_embeds=snake_case )
A_ : str = outputs.logits
A_ : int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A_ : List[str] = logits.softmax(-1 ).log()
if scores is None:
A_ , A_ : Union[str, Any] = logits.topk(snake_case , -1 )
A_ : Tuple = generated.expand(snake_case , *generated.shape[1:] )
A_ , A_ : str = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A_ : Union[str, Any] = next_tokens
else:
A_ : List[str] = tokens.expand(snake_case , *tokens.shape[1:] )
A_ : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
A_ : List[str] = -float(np.inf )
A_ : List[Any] = 0
A_ : Union[str, Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A_ : Optional[Any] = scores_sum / seq_lengths[:, None]
A_ , A_ : List[str] = scores_sum_average.view(-1 ).topk(snake_case , -1 )
A_ : str = next_tokens // scores_sum.shape[1]
A_ : Union[str, Any] = seq_lengths[next_tokens_source]
A_ : Optional[int] = next_tokens % scores_sum.shape[1]
A_ : Tuple = next_tokens.unsqueeze(1 )
A_ : Tuple = tokens[next_tokens_source]
A_ : Dict = torch.cat((tokens, next_tokens) , dim=1 )
A_ : Dict = generated[next_tokens_source]
A_ : Union[str, Any] = scores_sum_average * seq_lengths
A_ : Optional[int] = is_stopped[next_tokens_source]
A_ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A_ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1 )
A_ : Any = is_stopped + next_tokens.eq(snake_case ).squeeze()
if is_stopped.all():
break
A_ : int = scores / seq_lengths
A_ : str = scores.argsort(descending=snake_case )
# tokens tensors are already padded to max_seq_length
A_ : Dict = [tokens[i] for i in order]
A_ : int = torch.stack(snake_case , dim=0 )
A_ : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 300 | 0 |
from scipy.stats import pearsonr
import datasets
__lowerCamelCase = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__lowerCamelCase = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__lowerCamelCase = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> List[Any]:
'''simple docstring'''
if return_pvalue:
A_ = pearsonr(UpperCamelCase__ , UpperCamelCase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] )}
| 358 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int:
A_ = n * (n + 1) * (2 * n + 1) / 6
A_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 101 | 0 |
import datasets
__UpperCamelCase : Union[str, Any] = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__UpperCamelCase : Union[str, Any] = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__UpperCamelCase : Dict = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def A ( _lowercase , _lowercase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase__ ( datasets.Metric):
def __A ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
| 182 |
import pytest
__UpperCAmelCase : Optional[Any] = "__dummy_dataset1__"
__UpperCAmelCase : List[str] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A__ ( ) -> Optional[int]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A__ ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
__snake_case: List[Any] = dataset_loading_script_name
__snake_case: Any = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE__)
__snake_case: int = script_dir / F'''{script_name}.py'''
with open(SCREAMING_SNAKE_CASE__ , """w""") as f:
f.write(SCREAMING_SNAKE_CASE__)
return str(SCREAMING_SNAKE_CASE__)
| 111 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : int = 'CompVis/stable-diffusion-v1-1'
lowerCamelCase__ : Any = 'CompVis/stable-diffusion-v1-2'
lowerCamelCase__ : Optional[Any] = 'CompVis/stable-diffusion-v1-3'
lowerCamelCase__ : str = 'CompVis/stable-diffusion-v1-4'
class lowerCamelCase_ ( _a ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : CLIPTextModel , _lowerCAmelCase : CLIPTokenizer , _lowerCAmelCase : UNetaDConditionModel , _lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowerCAmelCase : StableDiffusionSafetyChecker , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : bool = True , ):
super()._init_()
SCREAMING_SNAKE_CASE_ : int = StableDiffusionPipeline.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline.from_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE_ : int = StableDiffusionPipeline(
vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , requires_safety_checker=snake_case_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return {k: getattr(self , snake_case_ ) for k in self.config.keys() if not k.startswith('_' )}
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_ : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case_ )
def lowerCAmelCase_ ( self : Optional[int] ):
self.enable_attention_slicing(snake_case_ )
@torch.no_grad()
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Optional[int] , ):
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : str , ):
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict , ):
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : List[str] , ):
return self.pipea(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
@torch.no_grad()
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : float = 7.5 , _lowerCAmelCase : Optional[Union[str, List[str]]] = None , _lowerCAmelCase : Optional[int] = 1 , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(snake_case_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE_ : int = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE_ : Any = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE_ : Any = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE_ : Any = self.textaimg_sda_a(
prompt=snake_case_ , height=snake_case_ , width=snake_case_ , num_inference_steps=snake_case_ , guidance_scale=snake_case_ , negative_prompt=snake_case_ , num_images_per_prompt=snake_case_ , eta=snake_case_ , generator=snake_case_ , latents=snake_case_ , output_type=snake_case_ , return_dict=snake_case_ , callback=snake_case_ , callback_steps=snake_case_ , **snake_case_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] ) | 371 |
import gc
import threading
import time
import psutil
import torch
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = psutil.Process()
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = -1
while True:
SCREAMING_SNAKE_CASE_ = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_ = True
self.thread.start()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ : List[str] = PeakCPUMemory()
def UpperCAmelCase_ ( ) -> Tuple:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = torch.cuda.memory_allocated(__UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
# Time
SCREAMING_SNAKE_CASE_ = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
SCREAMING_SNAKE_CASE_ = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ = (torch.cuda.memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
SCREAMING_SNAKE_CASE_ = (torch.cuda.max_memory_allocated(__UpperCAmelCase ) - start_measures[str(__UpperCAmelCase )]) / 2**20
return measures
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(__UpperCAmelCase )]:.2f}MiB" )
SCREAMING_SNAKE_CASE_ = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" ) | 210 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , a : str , a : Tuple=13 , a : List[Any]=7 , a : List[Any]=True , a : List[str]=True , a : str=True , a : List[str]=True , a : Union[str, Any]=99 , a : int=32 , a : Any=5 , a : Any=4 , a : Tuple=37 , a : str="gelu" , a : Tuple=0.1 , a : Tuple=0.1 , a : List[str]=512 , a : str=16 , a : List[str]=2 , a : Optional[int]=0.02 , a : Optional[int]=4 , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : Any = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : int = use_attention_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_choices
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = FlaxAlbertModelTester(self )
@slow
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class_name.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE : str = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Any = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4 ) ) | 76 | """simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
requires_backends(self ,'decord' )
self.check_model_type(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> int:
A__ = {}
if frame_sampling_rate is not None:
A__ = frame_sampling_rate
if num_frames is not None:
A__ = num_frames
A__ = {}
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=1 ) -> Union[str, Any]:
if num_frames is None:
A__ = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
A__ = BytesIO(requests.get(__UpperCAmelCase ).content )
A__ = VideoReader(__UpperCAmelCase )
videoreader.seek(0 )
A__ = 0
A__ = num_frames * frame_sampling_rate - 1
A__ = np.linspace(__UpperCAmelCase ,__UpperCAmelCase ,num=__UpperCAmelCase ,dtype=np.intaa )
A__ = videoreader.get_batch(__UpperCAmelCase ).asnumpy()
A__ = list(__UpperCAmelCase )
A__ = self.image_processor(__UpperCAmelCase ,return_tensors=self.framework )
return model_inputs
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
A__ = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(__UpperCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase ,__UpperCAmelCase )]
| 221 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _SCREAMING_SNAKE_CASE ( nn.Module):
_UpperCamelCase:int
_UpperCamelCase:jnp.dtype = jnp.floataa
def _snake_case ( self )-> str:
lowerCamelCase_ =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE )-> int:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =hidden_states.shape
lowerCamelCase_ =jax.image.resize(
_SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowerCamelCase_ =self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module):
_UpperCamelCase:int
_UpperCamelCase:jnp.dtype = jnp.floataa
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE )-> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowerCamelCase_ =self.conv(_SCREAMING_SNAKE_CASE )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module):
_UpperCamelCase:int
_UpperCamelCase:int = None
_UpperCamelCase:float = 0.0
_UpperCamelCase:bool = None
_UpperCamelCase:jnp.dtype = jnp.floataa
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.in_channels if self.out_channels is None else self.out_channels
lowerCamelCase_ =nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCamelCase_ =nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ =nn.Dense(_SCREAMING_SNAKE_CASE , dtype=self.dtype )
lowerCamelCase_ =nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCamelCase_ =nn.Dropout(self.dropout_prob )
lowerCamelCase_ =nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCamelCase_ =None
if use_nin_shortcut:
lowerCamelCase_ =nn.Conv(
_SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True )-> int:
lowerCamelCase_ =hidden_states
lowerCamelCase_ =self.norma(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =nn.swish(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.conva(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.time_emb_proj(nn.swish(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ =jnp.expand_dims(jnp.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , 1 )
lowerCamelCase_ =hidden_states + temb
lowerCamelCase_ =self.norma(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =nn.swish(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.dropout(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.conva(_SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
lowerCamelCase_ =self.conv_shortcut(_SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 49 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =SMALL_MODEL_IDENTIFIER
lowerCamelCase_ ="""pt"""
lowerCamelCase_ ="""tf"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[Any]:
lowerCamelCase_ =TFAutoModel.from_pretrained(self.test_model , from_pt=_SCREAMING_SNAKE_CASE )
model_tf.save_pretrained(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ ="""mock_framework"""
# Framework provided - return whatever the user provides
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Tuple:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch(
"""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_SCREAMING_SNAKE_CASE , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =MagicMock(return_value=_SCREAMING_SNAKE_CASE )
with patch("""transformers.onnx.features.is_tf_available""" , _SCREAMING_SNAKE_CASE ), patch(
"""transformers.onnx.features.is_torch_available""" , _SCREAMING_SNAKE_CASE ):
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =FeaturesManager.determine_framework(self.test_model )
| 49 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
a_ = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( snake_case__ , unittest.TestCase ):
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def __magic_name__ ( self : List[Any] ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : int =CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] ='<pad>'
SCREAMING_SNAKE_CASE__ : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : int =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowercase ) , 10_04 )
def __magic_name__ ( self : int ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def __magic_name__ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Any =CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Optional[int] ='I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : int =tokenizer.encode(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : int =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE__ : Dict =tokenizer.convert_ids_to_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ : str =tokenizer.tokenize(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.encode(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def __magic_name__ ( self : Optional[Any] ) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE__ : int ={'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE__ : Tuple =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__lowercase , ) | 152 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase: str = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__lowerCAmelCase: Dict = 'cpu'
__lowerCAmelCase: Optional[int] = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
__lowerCAmelCase: Optional[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
__lowerCAmelCase: Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase: Any = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 322 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Union[str, Any] = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 |
def __A ( __lowerCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(__lowerCamelCase , (list, tuple) ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
a = a = a = numbers[0]
for i in range(1 , len(__lowerCamelCase ) ):
# update the maximum and minimum subarray products
a = numbers[i]
if number < 0:
a , a = min_till_now, max_till_now
a = max(__lowerCamelCase , max_till_now * number )
a = min(__lowerCamelCase , min_till_now * number )
# update the maximum product found till now
a = max(__lowerCamelCase , __lowerCamelCase )
return max_prod
| 347 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase ( SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=1_026 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase :Optional[Any] = generate_datasets(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE , min_len=1_026 , trim=SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase :List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
__UpperCamelCase :str = load_gpta('''gpt2''' ).to(SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
__UpperCamelCase :List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=15 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
__UpperCamelCase :str = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase :List[str] = SecondaryLearner(SCREAMING_SNAKE_CASE )
# Train secondary learner
__UpperCamelCase :Tuple = train_secondary_learner(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_epochs=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=1_000 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=recopy_gpta , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ):
'''simple docstring'''
__UpperCamelCase :List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
__UpperCamelCase :Tuple = RandomSampler(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = max_steps // (len(SCREAMING_SNAKE_CASE )) + 1
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :int = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = recopy_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE )
secondary_learner.eval()
__UpperCamelCase :List[str] = []
__UpperCamelCase :str = 0
__UpperCamelCase :int = []
__UpperCamelCase :int = []
# Compute the performance of the transformer model at the beginning
__UpperCamelCase :List[str] = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
for epoch in range(int(SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
__UpperCamelCase :Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
__UpperCamelCase :Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase :List[str] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = True
if secondary_learner is not None:
__UpperCamelCase :List[Any] = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase :List[Any] = -1
if predicted_q < threshold:
__UpperCamelCase :List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__UpperCamelCase :int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase :Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase :Tuple = compute_perplexity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
test_perps.append(SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , SCREAMING_SNAKE_CASE , ''':''' , SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
__UpperCamelCase :Optional[Any] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
__UpperCamelCase :str = training_secondary_learner(
SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
__UpperCamelCase :Union[str, Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase :Dict = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE , secondary_learner=SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 43 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A__ : Union[str, Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
_a = '''rag'''
_a = True
def __init__( self : str , A_ : List[Any]=None , A_ : str=True , A_ : Tuple=None , A_ : Union[str, Any]=None , A_ : List[str]=None , A_ : List[str]=None , A_ : List[Any]=None , A_ : Union[str, Any]=" / " , A_ : Tuple=" // " , A_ : Any=5 , A_ : Optional[Any]=3_0_0 , A_ : Tuple=7_6_8 , A_ : Union[str, Any]=8 , A_ : Dict="wiki_dpr" , A_ : Optional[Any]="train" , A_ : Dict="compressed" , A_ : Optional[int]=None , A_ : List[str]=None , A_ : str=False , A_ : Dict=False , A_ : Dict=0.0 , A_ : List[str]=True , A_ : List[str]=False , A_ : List[Any]=False , A_ : Any=False , A_ : Optional[int]=True , A_ : int=None , **A_ : List[str] , ):
super().__init__(
bos_token_id=A_ , pad_token_id=A_ , eos_token_id=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , is_encoder_decoder=A_ , prefix=A_ , vocab_size=A_ , **A_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCAmelCase_ : List[str] = kwargs.pop('''question_encoder''')
lowerCAmelCase_ : Tuple = question_encoder_config.pop('''model_type''')
lowerCAmelCase_ : Tuple = kwargs.pop('''generator''')
lowerCAmelCase_ : Dict = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : int = AutoConfig.for_model(A_ , **A_)
lowerCAmelCase_ : List[Any] = reduce_loss
lowerCAmelCase_ : Optional[Any] = label_smoothing
lowerCAmelCase_ : Union[str, Any] = exclude_bos_score
lowerCAmelCase_ : List[Any] = do_marginalize
lowerCAmelCase_ : int = title_sep
lowerCAmelCase_ : Optional[int] = doc_sep
lowerCAmelCase_ : List[str] = n_docs
lowerCAmelCase_ : int = max_combined_length
lowerCAmelCase_ : Union[str, Any] = dataset
lowerCAmelCase_ : int = dataset_split
lowerCAmelCase_ : Dict = index_name
lowerCAmelCase_ : Union[str, Any] = retrieval_vector_size
lowerCAmelCase_ : Optional[Any] = retrieval_batch_size
lowerCAmelCase_ : List[str] = passages_path
lowerCAmelCase_ : Any = index_path
lowerCAmelCase_ : int = use_dummy_dataset
lowerCAmelCase_ : Tuple = output_retrieved
lowerCAmelCase_ : List[Any] = do_deduplication
lowerCAmelCase_ : Union[str, Any] = use_cache
if self.forced_eos_token_id is None:
lowerCAmelCase_ : List[Any] = getattr(self.generator , '''forced_eos_token_id''' , A_)
@classmethod
def UpperCAmelCase__ ( cls : str , A_ : PretrainedConfig , A_ : PretrainedConfig , **A_ : Any):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = copy.deepcopy(self.__dict__)
lowerCAmelCase_ : Tuple = self.question_encoder.to_dict()
lowerCAmelCase_ : Dict = self.generator.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 103 | 0 |
def UpperCamelCase_( snake_case__: int , snake_case__: int ) -> int:
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def UpperCamelCase_( snake_case__: int , snake_case__: int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b )
def UpperCamelCase_( ) -> Tuple:
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main() | 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
if isinstance(snake_case , snake_case ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(snake_case , snake_case ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
snake_case_ = False
if num < 0:
snake_case_ = True
snake_case_ = -num
snake_case_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(snake_case ) for e in binary )
return "0b" + "".join(str(snake_case ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(UpperCamelCase__ ):
print(F"""{i}\t\t{d}""" )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
return True
return False
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = [float("""inf""" )] * vertex_count
_a : Any = 0.0
for _ in range(vertex_count - 1 ):
for j in range(UpperCamelCase__ ):
_a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]:
_a : Any = distance[u] + w
_a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if negative_cycle_exists:
raise Exception("""Negative cycle found""" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input('Enter number of vertices: ').strip())
_snake_case = int(input('Enter number of edges: ').strip())
_snake_case = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
_snake_case , _snake_case , _snake_case = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
_snake_case = {'src': src, 'dst': dest, 'weight': weight}
_snake_case = int(input('\nEnter shortest path source:').strip())
_snake_case = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 294 | 0 |
import os
import numpy
import onnx
def _UpperCamelCase (a__ :Optional[int] , a__ :int ):
"""simple docstring"""
UpperCamelCase__ = a.name
UpperCamelCase__ = b.name
UpperCamelCase__ = """"""
UpperCamelCase__ = """"""
UpperCamelCase__ = a == b
UpperCamelCase__ = name_a
UpperCamelCase__ = name_b
return res
def _UpperCamelCase (a__ :Tuple , a__ :Union[str, Any] , a__ :str ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (a__ :Tuple , a__ :Dict , a__ :Optional[int] ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (a__ :List[str] , a__ :List[Any] , a__ :Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = list(model.graph.initializer )
UpperCamelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCamelCase__ = inits[i].name
UpperCamelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _UpperCamelCase (a__ :Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = os.path.dirname(_lowerCamelCase )
UpperCamelCase__ = os.path.basename(_lowerCamelCase )
UpperCamelCase__ = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase__ = list(model.graph.initializer )
UpperCamelCase__ = set()
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
UpperCamelCase__ = inits[j].data_type
UpperCamelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
UpperCamelCase__ = inits[i].name
UpperCamelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
UpperCamelCase__ = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
UpperCamelCase__ = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase__ = """optimized_""" + model_file_name
UpperCamelCase__ = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 363 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if hor == 128:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 64, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCamelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCamelCase__ = model
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 87 | 0 |
"""simple docstring"""
import string
from math import logaa
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = document.translate(
str.maketrans("", "", string.punctuation ) ).replace("\n", "" )
UpperCAmelCase_ : str = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = corpus.lower().translate(
str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase_ : str = corpus_without_punctuation.split("\n" )
UpperCAmelCase_ : Union[str, Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowerCamelCase ))
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ), 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ), 3 )
def __a ( __lowerCamelCase, __lowerCamelCase ):
return round(tf * idf, 3 )
| 61 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 0 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = str(__lowerCAmelCase )
return n == n[::-1]
def __lowercase ( __lowerCAmelCase : Optional[int] = 1_0_0_0_0_0_0 ):
a__ = 0
for i in range(1 , __lowerCAmelCase ):
if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 350 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : str = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
a__ = b.T
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
a__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
a__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
a__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
a__ = x.reshape(-1 , 3 )
a__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :bool = True ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'height': 2_56, 'width': 2_56}
a__ = get_size_dict(__snake_case )
a__ = np.array(__snake_case ) if clusters is not None else None
a__ = do_resize
a__ = size
a__ = resample
a__ = do_normalize
a__ = do_color_quantize
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Any ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
__snake_case ,size=(size['height'], size['width']) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,) -> np.ndarray:
a__ = rescale(image=__snake_case ,scale=1 / 1_27.5 ,data_format=__snake_case )
a__ = image - 1
return image
def lowerCamelCase__( self :Optional[Any] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[List[List[int]], np.ndarray]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**__snake_case :List[str] ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case )
a__ = resample if resample is not None else self.resample
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
a__ = clusters if clusters is not None else self.clusters
a__ = np.array(__snake_case )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a__ = [self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ) for image in images]
if do_normalize:
a__ = [self.normalize(image=__snake_case ) for image in images]
if do_color_quantize:
a__ = [to_channel_dimension_format(__snake_case ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
a__ = np.array(__snake_case )
a__ = color_quantize(__snake_case ,__snake_case ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
a__ = images.shape[0]
a__ = images.reshape(__snake_case ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
a__ = list(__snake_case )
else:
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'input_ids': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 109 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any] ) -> Union[str, Any]:
__snake_case : Optional[int] = "ZinengTang/tvlt-base"
__snake_case : List[str] = tempfile.mkdtemp()
def __snake_case ( self : List[Any] , **lowerCamelCase : Tuple ) -> Union[str, Any]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase )
def __snake_case ( self : Tuple , **lowerCamelCase : int ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def __snake_case ( self : List[Any] ) -> str:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : int = self.get_image_processor()
__snake_case : str = self.get_feature_extractor()
__snake_case : List[str] = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : int = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __snake_case ( self : Tuple ) -> int:
__snake_case : Union[str, Any] = self.get_image_processor()
__snake_case : Dict = self.get_feature_extractor()
__snake_case : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
__snake_case : Optional[Any] = np.ones([12000] )
__snake_case : Optional[Any] = feature_extractor(lowerCamelCase , return_tensors="np" )
__snake_case : Tuple = processor(audio=lowerCamelCase , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self : int ) -> List[Any]:
__snake_case : int = self.get_image_processor()
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : Tuple = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
__snake_case : Tuple = np.ones([3, 224, 224] )
__snake_case : Optional[Any] = image_processor(lowerCamelCase , return_tensors="np" )
__snake_case : Union[str, Any] = processor(images=lowerCamelCase , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Dict = self.get_image_processor()
__snake_case : Dict = self.get_feature_extractor()
__snake_case : List[Any] = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
__snake_case : Tuple = np.ones([12000] )
__snake_case : List[str] = np.ones([3, 224, 224] )
__snake_case : List[str] = processor(audio=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __snake_case ( self : Tuple ) -> str:
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Union[str, Any] = self.get_feature_extractor()
__snake_case : str = TvltProcessor(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 123 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Load configuration defined in the metadata file
with open(__lowerCamelCase ) as metadata_file:
__snake_case : Tuple = json.load(__lowerCamelCase )
__snake_case : int = LukeConfig(use_entity_aware_attention=__lowerCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__snake_case : Any = torch.load(__lowerCamelCase , map_location="cpu" )["module"]
# Load the entity vocab file
__snake_case : Any = load_original_entity_vocab(__lowerCamelCase )
# add an entry for [MASK2]
__snake_case : int = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__snake_case : List[str] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__snake_case : List[Any] = AddedToken("<ent>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
__snake_case : str = AddedToken("<ent2>" , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "r" ) as f:
__snake_case : int = json.load(__lowerCamelCase )
__snake_case : str = "MLukeTokenizer"
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
__snake_case : Any = MLukeTokenizer.from_pretrained(__lowerCamelCase )
# Initialize the embeddings of the special tokens
__snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(["@"] )[0]
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(["#"] )[0]
__snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
__snake_case : Tuple = word_emb[ent_init_index].unsqueeze(0 )
__snake_case : Tuple = word_emb[enta_init_index].unsqueeze(0 )
__snake_case : List[Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__snake_case : Optional[int] = state_dict[bias_name]
__snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 )
__snake_case : Union[str, Any] = decoder_bias[enta_init_index].unsqueeze(0 )
__snake_case : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
__snake_case : int = state_dict[prefix + matrix_name]
__snake_case : Optional[Any] = state_dict[prefix + matrix_name]
__snake_case : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__snake_case : Union[str, Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
__snake_case : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__snake_case : List[Any] = state_dict["entity_predictions.bias"]
__snake_case : str = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
__snake_case : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias] )
__snake_case : Any = LukeForMaskedLM(config=__lowerCamelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
__snake_case : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
__snake_case : Dict = state_dict[key]
else:
__snake_case : int = state_dict[key]
__snake_case , __snake_case : Union[str, Any] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
if set(__lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(__lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__snake_case : Union[str, Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase , task="entity_classification" )
__snake_case : Optional[Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
__snake_case : Tuple = (0, 9)
__snake_case : Dict = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : str = torch.Size((1, 3_3, 7_6_8) )
__snake_case : List[str] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__snake_case : Union[str, Any] = torch.Size((1, 1, 7_6_8) )
__snake_case : Optional[int] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__snake_case : Union[str, Any] = MLukeTokenizer.from_pretrained(__lowerCamelCase )
__snake_case : List[Any] = "Tokyo is the capital of <mask>."
__snake_case : List[Any] = (2_4, 3_0)
__snake_case : Tuple = tokenizer(__lowerCamelCase , entity_spans=[span] , return_tensors="pt" )
__snake_case : Optional[Any] = model(**__lowerCamelCase )
__snake_case : Tuple = encoding["input_ids"][0].tolist()
__snake_case : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
__snake_case : Optional[int] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCamelCase )
__snake_case : Dict = outputs.entity_logits[0][0].argmax().item()
__snake_case : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__lowerCamelCase ) )
model.save_pretrained(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Union[str, Any] = ["[MASK]", "[PAD]", "[UNK]"]
__snake_case : Tuple = [json.loads(__lowerCamelCase ) for line in open(__lowerCamelCase )]
__snake_case : Dict = {}
for entry in data:
__snake_case : Optional[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__snake_case : Union[str, Any] = entity_id
break
__snake_case : Tuple = F'{language}:{entity_name}'
__snake_case : int = entity_id
return new_mapping
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_snake_case : int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 123 | 1 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
snake_case__ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = max(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = min(__SCREAMING_SNAKE_CASE )
# create the counting array
snake_case__ : str = coll_max + 1 - coll_min
snake_case__ : Any = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = counting_arr[i] + counting_arr[i - 1]
# create the output collection
snake_case__ : Union[str, Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __SCREAMING_SNAKE_CASE ) ):
snake_case__ : Optional[Any] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
return "".join([chr(__SCREAMING_SNAKE_CASE ) for i in counting_sort([ord(__SCREAMING_SNAKE_CASE ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__a = input("Enter numbers separated by a comma:\n").strip()
__a = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 370 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( UpperCAmelCase__ ):
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'LayoutLMv2ImageProcessor'
__UpperCAmelCase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , a=None , a=None , **a ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
UpperCamelCase__ = kwargs.pop("feature_extractor" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , a , a = None , a = None , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ):
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
UpperCamelCase__ = self.image_processor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ = features["words"]
UpperCamelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# add pixel values
UpperCamelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCamelCase__ = self.get_overflowing_images(_UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
UpperCamelCase__ = images
return encoded_inputs
def __a ( self , a , a ):
UpperCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(_UpperCAmelCase )} and {len(_UpperCAmelCase )}''' )
return images_with_overflow
def __a ( self , *a , **a ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __a ( self , *a , **a ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __a ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def __a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 80 |
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict ) -> int:
"""simple docstring"""
lowercase__ = 0
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(__lowercase ) / '''preprocessor_config.json'''
lowercase__ = Path(__lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(__lowercase ) / '''preprocessor_config.json'''
lowercase__ = Path(__lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase__ = Path(__lowercase ) / '''preprocessor_config.json'''
lowercase__ = Path(__lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase__ = CLIPImageProcessor(**__lowercase )
# save in new folder
model_config.save_pretrained(__lowercase )
config.save_pretrained(__lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase )
# make sure private variable is not incorrectly saved
lowercase__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(__lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''' )
def lowerCamelCase_ ( self: int ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> str:
"""simple docstring"""
with self.assertRaises(__lowercase ):
lowercase__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase ):
lowercase__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
AutoImageProcessor.register(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = Path(__lowercase ) / '''preprocessor_config.json'''
lowercase__ = Path(__lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) )
lowercase__ = CustomImageProcessor.from_pretrained(__lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__lowercase )
lowercase__ = AutoImageProcessor.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self: Any ) -> List[Any]:
"""simple docstring"""
class _a ( UpperCAmelCase_ ):
_lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , __lowercase )
AutoImageProcessor.register(__lowercase , __lowercase )
# If remote code is not set, the default is to use local
lowercase__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 366 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''lxmert'''
_lowercase : Any = {}
def __init__( self: Any , UpperCamelCase_: List[Any]=30_522 , UpperCamelCase_: int=768 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Dict=9_500 , UpperCamelCase_: List[Any]=1_600 , UpperCamelCase_: List[Any]=400 , UpperCamelCase_: List[str]=3_072 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: List[str]=512 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Dict=1E-1_2 , UpperCamelCase_: List[Any]=9 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: str=2_048 , UpperCamelCase_: Dict=4 , UpperCamelCase_: Any=6.67 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=True , **UpperCamelCase_: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = num_qa_labels
lowercase__ = num_object_labels
lowercase__ = num_attr_labels
lowercase__ = l_layers
lowercase__ = x_layers
lowercase__ = r_layers
lowercase__ = visual_feat_dim
lowercase__ = visual_pos_dim
lowercase__ = visual_loss_normalizer
lowercase__ = task_matched
lowercase__ = task_mask_lm
lowercase__ = task_obj_predict
lowercase__ = task_qa
lowercase__ = visual_obj_loss
lowercase__ = visual_attr_loss
lowercase__ = visual_feat_loss
lowercase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase_ )
| 93 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case :int = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[Any] = ['''input_values''', '''padding_mask''']
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 24_000 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : float = None , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = chunk_length_s
__a = overlap
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str, PaddingStrategy]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''')
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''')
elif padding is None:
# by default let's pad the inputs
__a = True
__a = bool(
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list))))
if is_batched:
__a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa).T for audio in raw_audio]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
__a = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa)
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray) and raw_audio.dtype is np.dtype(np.floataa):
__a = raw_audio.astype(np.floataa)
# always return batch
if not is_batched:
__a = [np.asarray(__SCREAMING_SNAKE_CASE).T]
# verify inputs are valid
for idx, example in enumerate(__SCREAMING_SNAKE_CASE):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}')
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels')
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels')
__a = None
__a = BatchFeature({'''input_values''': raw_audio})
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__a = min(array.shape[0] for array in raw_audio)
__a = int(np.floor(max_length / self.chunk_stride))
__a = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__a = max(array.shape[0] for array in raw_audio)
__a = int(np.ceil(max_length / self.chunk_stride))
__a = (nb_step - 1) * self.chunk_stride + self.chunk_length
__a = '''max_length'''
else:
__a = input_values
# normal padding on batch
if padded_inputs is None:
__a = self.pad(
__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
if padding:
__a = padded_inputs.pop('''attention_mask''')
__a = []
for example in padded_inputs.pop('''input_values'''):
if self.feature_size == 1:
__a = example[..., None]
input_values.append(example.T)
__a = input_values
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE)
return padded_inputs
| 49 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(_UpperCAmelCase )
else:
__a = BigBirdForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_UpperCAmelCase , _UpperCAmelCase , is_trivia_qa=_UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
__snake_case :Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49 | 1 |
"""simple docstring"""
from typing import Any
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = data
UpperCamelCase : Optional[Any] = None
def __repr__( self ):
return f'Node({self.data})'
class lowerCamelCase :
def __init__( self ):
UpperCamelCase : Dict = None
def __iter__( self ):
UpperCamelCase : int = self.head
while node:
yield node.data
UpperCamelCase : Union[str, Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCamelCase : List[Any] = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = current.next
UpperCamelCase : Optional[Any] = data
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCamelCase : Optional[Any] = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
UpperCamelCase : Dict = new_node
elif index == 0:
UpperCamelCase : Any = self.head # link new_node to head
UpperCamelCase : Any = new_node
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : str = temp.next
UpperCamelCase : Any = temp.next
UpperCamelCase : Optional[Any] = new_node
def a_ ( self ): # print every node data
print(self )
def a_ ( self ):
return self.delete_nth(0 )
def a_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def a_ ( self , SCREAMING_SNAKE_CASE_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCamelCase : Union[str, Any] = self.head # default first node
if index == 0:
UpperCamelCase : Optional[Any] = self.head.next
else:
UpperCamelCase : Dict = self.head
for _ in range(index - 1 ):
UpperCamelCase : int = temp.next
UpperCamelCase : Optional[Any] = temp.next
UpperCamelCase : Dict = temp.next.next
return delete_node.data
def a_ ( self ):
return self.head is None
def a_ ( self ):
UpperCamelCase : Optional[Any] = None
UpperCamelCase : Union[str, Any] = self.head
while current:
# Store the current node's next node.
UpperCamelCase : Optional[int] = current.next
# Make the current node's next point backwards
UpperCamelCase : Optional[Any] = prev
# Make the previous node be the current node
UpperCamelCase : int = current
# Make the current node the next node (to progress iteration)
UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
UpperCamelCase : Optional[int] = prev
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(snake_case_ ) == i
linked_list.insert_nth(snake_case_ ,i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 ,1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(snake_case_ ) == 9
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 ,1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCamelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(-8 ,1 ) )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
UpperCamelCase : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCamelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCamelCase : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(snake_case_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case_ )
assert (
str(snake_case_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(snake_case_ )
print("""\nReading/changing Node data using indexing:""" )
print(f'Element at Position 1: {linked_list[1]}' )
UpperCamelCase : List[Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(snake_case_ )
print(f'length of linked_list is : {len(snake_case_ )}' )
if __name__ == "__main__":
main()
| 358 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( snake_case_ : str = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
UpperCamelCase : Any = BeautifulSoup(requests.get(snake_case_ ).text ,"""html.parser""" )
UpperCamelCase : Optional[int] = soup.findAll("""h1""" )
UpperCamelCase : List[Any] = soup.findAll("""div""" ,{"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" ,{"""class""": """panel-title"""} )
values += soup.findAll("""div""" ,{"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(snake_case_ ,snake_case_ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 27 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = VQModel
UpperCAmelCase : Optional[int] = """sample"""
@property
def __snake_case ( self : Tuple , __UpperCAmelCase : Tuple=(32, 32)):
a : List[str] = 4
a : List[Any] = 3
a : Dict = floats_tensor((batch_size, num_channels) + sizes).to(__UpperCAmelCase)
return {"sample": image}
@property
def __snake_case ( self : Dict):
return (3, 32, 32)
@property
def __snake_case ( self : Tuple):
return (3, 32, 32)
def __snake_case ( self : List[Any]):
a : int = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
a : Tuple = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : List[Any]):
pass
def __snake_case ( self : str):
pass
def __snake_case ( self : Dict):
a , a : str = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__UpperCAmelCase)
a : Tuple = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __snake_case ( self : List[str]):
a : Any = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__UpperCAmelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
a : Optional[int] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
a : Optional[Any] = image.to(__UpperCAmelCase)
with torch.no_grad():
a : List[str] = model(__UpperCAmelCase).sample
a : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a : Union[str, Any] = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
| 40 |
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a__:
def __init__( self : List[Any] , __snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : str = deepcopy(__snake_case )
elif os.path.exists(__snake_case ):
with io.open(__snake_case , 'r' , encoding='utf-8' ) as f:
a : Optional[Any] = json.load(__snake_case )
else:
try:
a : Any = baseaa.urlsafe_baadecode(__snake_case ).decode('utf-8' )
a : Union[str, Any] = json.loads(__snake_case )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
a : List[str] = config
self.set_stage_and_offload()
def lowercase_ ( self : List[str] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
a : Dict = self.get_value('zero_optimization.stage' , -1 )
# offload
a : str = False
if self.is_zeroa() or self.is_zeroa():
a : Union[str, Any] = set(['cpu', 'nvme'] )
a : Optional[Any] = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
a : List[str] = True
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a : str = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
a : Dict = nodes.pop()
for node in nodes:
a : List[Any] = config.get(__snake_case )
if config is None:
return None, ds_key
return config, ds_key
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=None ):
a , a : List[Any] = self.find_config_node(__snake_case )
if config is None:
return default
return config.get(__snake_case , __snake_case )
def lowercase_ ( self : int , __snake_case : Optional[Any] , __snake_case : List[str]=False ):
a : Optional[Any] = self.config
# find the config node of interest if it exists
a : List[str] = ds_key_long.split('.' )
for node in nodes:
a : str = config
a : Dict = config.get(__snake_case )
if config is None:
if must_exist:
raise ValueError(F"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[int] ):
a : Union[str, Any] = self.get_value(__snake_case )
return False if value is None else bool(__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : str ):
a : Optional[Any] = self.get_value(__snake_case )
return False if value is None else not bool(__snake_case )
def lowercase_ ( self : Optional[Any] ):
return self._stage == 2
def lowercase_ ( self : Union[str, Any] ):
return self._stage == 3
def lowercase_ ( self : str ):
return self._offload
class a__:
def __init__( self : Tuple , __snake_case : str ):
a : Optional[Any] = engine
def lowercase_ ( self : Union[str, Any] , __snake_case : str , **__snake_case : Tuple ):
# runs backpropagation and handles mixed precision
self.engine.backward(__snake_case , **__snake_case )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a__( lowerCamelCase__ ):
def __init__( self : str , __snake_case : List[str] ):
super().__init__(__snake_case , device_placement=__snake_case , scaler=__snake_case )
a : Optional[Any] = hasattr(self.optimizer , 'overflow' )
def lowercase_ ( self : Dict , __snake_case : Dict=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowercase_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowercase_ ( self : Tuple ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ):
super().__init__(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a__:
def __init__( self : List[Any] , __snake_case : str , __snake_case : Dict=0.001 , __snake_case : Union[str, Any]=0 , **__snake_case : List[Any] ):
a : Optional[Any] = params
a : str = lr
a : List[str] = weight_decay
a : str = kwargs
class a__:
def __init__( self : str , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : Tuple=0 , **__snake_case : Any ):
a : Union[str, Any] = optimizer
a : Any = total_num_steps
a : List[str] = warmup_num_steps
a : int = kwargs | 297 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 368 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 8 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''MobileNetV2FeatureExtractor''']
a_ = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _a ( *a :List[str] ) -> List[Any]:
with open(a , '''r''' ) as fh:
fcntl.flock(a , fcntl.LOCK_EX )
try:
print(*a )
finally:
fcntl.flock(a , fcntl.LOCK_UN )
UpperCAmelCase__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ = torch.device("cuda", local_rank)
UpperCAmelCase__ = socket.gethostname()
UpperCAmelCase__ = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ = dist.get_rank()
UpperCAmelCase__ = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 369 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 0 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ''''''
__lowerCAmelCase = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(self , **_UpperCAmelCase )
__a : Union[str, Any] = repo_info
__a : Optional[int] = token
__a : Any = None
def _lowerCamelCase ( self ):
if self.dir_cache is None:
__a : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_UpperCAmelCase ): {'''name''': str(_UpperCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "rb" , **_UpperCAmelCase , ):
if not isinstance(self.repo_info , _UpperCAmelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Dict = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
self._get_dirs()
__a : Optional[Any] = self._strip_protocol(_UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
self._get_dirs()
__a : Dict = PurePosixPath(path.strip('''/''' ) )
__a : Tuple = {}
for p, f in self.dir_cache.items():
__a : Optional[int] = PurePosixPath(p.strip('''/''' ) )
__a : Tuple = p.parent
if root == path:
__a : Any = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out ) | 188 |
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 | 1 |
"""simple docstring"""
import requests
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> None:
SCREAMING_SNAKE_CASE = {'Content-Type': 'application/json'}
SCREAMING_SNAKE_CASE = requests.post(SCREAMING_SNAKE_CASE_ , json={'text': message_body} , headers=SCREAMING_SNAKE_CASE_ )
if response.status_code != 2_00:
SCREAMING_SNAKE_CASE = (
'Request to slack returned an error '
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 113 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113 | 1 |
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_ = os.path.join(git_repo_path, """src""", """transformers""")
SCREAMING_SNAKE_CASE_ = """
{0} = None
"""
SCREAMING_SNAKE_CASE_ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
SCREAMING_SNAKE_CASE_ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(lowerCamelCase__ ,"""tokenizers""" )
SCREAMING_SNAKE_CASE = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(lowerCamelCase__ ,"""tensorflow_text""" )
SCREAMING_SNAKE_CASE = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(lowerCamelCase__ ,"""sentencepiece_and_tokenizers""" )
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(lowerCamelCase__ ,"""sentencepiece_and_tensorflow_text""" )
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(lowerCamelCase__ ,"""sentencepiece_and_tokenizers_and_vision""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" ,lowerCamelCase__ )
self.assertIn("""tensorflow_text""" ,lowerCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" ,lowerCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" ,objects["""torch"""] )
self.assertIn("""TFBertModel""" ,objects["""tf"""] )
self.assertIn("""FlaxBertModel""" ,objects["""flax"""] )
self.assertIn("""BertModel""" ,objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" ,objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" ,objects["""sentencepiece_and_tokenizers"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" ,"""'torch'""" )
self.assertEqual(lowerCamelCase__ ,"""\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE = create_dummy_object("""function""" ,"""'torch'""" )
self.assertEqual(
lowerCamelCase__ ,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" ,"""'torch'""" )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] ,lowerCamelCase__ )
| 193 |
from __future__ import annotations
import math
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 11 ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __lowercase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''')
| 193 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43 | 0 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : Tuple = input('''Enter message: ''' )
UpperCAmelCase__ : Tuple = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : str = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : List[Any] = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Union[str, Any] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Dict = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : List[str] = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : List[str] = key
UpperCAmelCase__ : Tuple = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : Any = [''''''] * num_cols
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Any = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : List[str] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299 |
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a_ = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
a_ = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
a_ = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="auto" , __lowerCamelCase=-1 , __lowerCamelCase=0.9 , __lowerCamelCase=5 , __lowerCamelCase=500 , __lowerCamelCase="gpt2-large" , __lowerCamelCase=-1 , __lowerCamelCase=1024 , __lowerCamelCase=25 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase=25 , ):
'''simple docstring'''
__A : List[Any] = compute_mauve(
p_text=__SCREAMING_SNAKE_CASE , q_text=__SCREAMING_SNAKE_CASE , p_features=__SCREAMING_SNAKE_CASE , q_features=__SCREAMING_SNAKE_CASE , p_tokens=__SCREAMING_SNAKE_CASE , q_tokens=__SCREAMING_SNAKE_CASE , num_buckets=__SCREAMING_SNAKE_CASE , pca_max_data=__SCREAMING_SNAKE_CASE , kmeans_explained_var=__SCREAMING_SNAKE_CASE , kmeans_num_redo=__SCREAMING_SNAKE_CASE , kmeans_max_iter=__SCREAMING_SNAKE_CASE , featurize_model_name=__SCREAMING_SNAKE_CASE , device_id=__SCREAMING_SNAKE_CASE , max_text_length=__SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=__SCREAMING_SNAKE_CASE , mauve_scaling_factor=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , seed=__SCREAMING_SNAKE_CASE , )
return out
| 179 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : List[str] = ["text", "image", "audio"]
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(__SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def snake_case_ ( __SCREAMING_SNAKE_CASE : List ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
for output in outputs:
if isinstance(__SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : Tuple = self.tool(*__SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Any = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def _snake_case ( self ):
"""simple docstring"""
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = create_inputs(self.tool.inputs )
lowercase_ : int = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs ):
lowercase_ : Optional[int] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : int = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Optional[Any] = self.tool(*__SCREAMING_SNAKE_CASE )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 93 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 1_00 ):
UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 280 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = TextDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = text_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [text_path]
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : Union[str, Any] = {'text': 'string'}
UpperCAmelCase : List[Any] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = TextDatasetReader({'train': text_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[Any] = TextDatasetReader({'train': text_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : int = {split: text_path}
else:
UpperCAmelCase : int = 'train'
UpperCAmelCase : Any = {'train': text_path, 'test': text_path}
UpperCAmelCase : Dict = tmp_path / 'cache'
UpperCAmelCase : Any = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 280 | 1 |
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = data
a = None
def __repr__(self ):
"""simple docstring"""
return F'''Node({self.data})'''
class _lowercase :
"""simple docstring"""
def __init__(self ):
"""simple docstring"""
a = None
def __iter__(self ):
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next
def __len__(self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__(self ):
"""simple docstring"""
return "->".join([str(__a ) for item in self] )
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
a = self.head
for _ in range(__a ):
a = current.next
a = data
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self.insert_nth(len(self ) , __a )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self.insert_nth(0 , __a )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
a = Node(__a )
if self.head is None:
a = new_node
elif index == 0:
a = self.head # link new_node to head
a = new_node
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = new_node
def UpperCamelCase_ (self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase_ (self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ (self , lowerCamelCase_ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
a = self.head # default first node
if index == 0:
a = self.head.next
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = temp.next.next
return delete_node.data
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase_ (self ):
"""simple docstring"""
a = None
a = self.head
while current:
# Store the current node's next node.
a = current.next
# Make the current node's next point backwards
a = prev
# Make the previous node be the current node
a = current
# Make the current node the next node (to progress iteration)
a = next_node
# Return prev in order to put the head at the end
a = prev
def a( ) -> int:
"""simple docstring"""
a = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a( ) -> List[str]:
"""simple docstring"""
a = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
a = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a = linked_list.delete_tail()
assert result == 12.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
a = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(_SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
a = input("Enter New Value: " ).strip()
print("New list:" )
print(_SCREAMING_SNAKE_CASE )
print(f'''length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
main()
| 227 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : float ) ->float:
return 1_0 - x * x
def _lowerCAmelCase ( UpperCAmelCase__ : float, UpperCAmelCase__ : float ) ->float:
# Bolzano theory in order to find if there is a root between a and b
if equation(UpperCAmelCase__ ) * equation(UpperCAmelCase__ ) >= 0:
raise ValueError("""Wrong space!""" )
A__ : Dict = a
while (b - a) >= 0.01:
# Find middle point
A__ : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase__ ) * equation(UpperCAmelCase__ ) < 0:
A__ : int = c
else:
A__ : List[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 357 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'Salesforce/blip-image-captioning-base'
snake_case_ = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case_ = 'image_captioner'
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ['image']
snake_case_ = ['text']
def __init__( self : int , *snake_case : Optional[int] , **snake_case : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case , **snake_case )
def _UpperCamelCase ( self : int , snake_case : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case , return_tensors="""pt""" )
def _UpperCamelCase ( self : int , snake_case : List[Any] ):
'''simple docstring'''
return self.model.generate(**snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case , skip_special_tokens=snake_case )[0].strip()
| 296 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : List[Any] =logging.get_logger(__name__)
_A : Tuple ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : List[Any] ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : List[Any] ={'''facebook/blenderbot-3B''': 128}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["""input_ids""", """attention_mask"""]
a = BlenderbotTokenizer
def __init__( self: Union[str, Any] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: int=None , UpperCamelCase__: Dict="replace" , UpperCamelCase__: Any="<s>" , UpperCamelCase__: Dict="</s>" , UpperCamelCase__: Any="</s>" , UpperCamelCase__: Union[str, Any]="<s>" , UpperCamelCase__: Tuple="<unk>" , UpperCamelCase__: Union[str, Any]="<pad>" , UpperCamelCase__: Optional[Any]="<mask>" , UpperCamelCase__: Tuple=False , UpperCamelCase__: str=True , **UpperCamelCase__: Optional[Any] , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : List[str] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase__ : Union[str, Any] = add_prefix_space
lowerCamelCase__ : Optional[int] = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase__ : Dict = add_prefix_space
lowerCamelCase__ : Tuple = """post_processor"""
lowerCamelCase__ : Tuple = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : str = tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase__ : Optional[Any] = tuple(state["""cls"""] )
lowerCamelCase__ : Optional[int] = False
if state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase__ : Tuple = add_prefix_space
lowerCamelCase__ : Optional[Any] = True
if state.get("""trim_offsets""" , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase__ : int = trim_offsets
lowerCamelCase__ : int = True
if changes_to_apply:
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase__ , state.pop("""type""" ) )
lowerCamelCase__ : Any = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self: str ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase__ : int = value
def lowerCamelCase_ ( self: Union[str, Any] , *UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] , *UpperCamelCase__: List[Any] , **UpperCamelCase__: int ):
lowerCamelCase__ : List[str] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Dict = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : List[Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: "Conversation" ):
lowerCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase__ : str = """ """.join(UpperCamelCase__ )
lowerCamelCase__ : str = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase__ : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 41 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = LxmertTokenizer
SCREAMING_SNAKE_CASE__ = LxmertTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Any = '''UNwant\u00E9d,running'''
a :Any = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = self.tokenizer_class(self.vocab_file )
a :Optional[int] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.test_rust_tokenizer:
return
a :Tuple = self.get_tokenizer()
a :Any = self.get_rust_tokenizer()
a :List[str] = '''I was born in 92000, and this is falsé.'''
a :Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
a :List[Any] = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :Any = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Optional[int] = self.get_rust_tokenizer()
a :Any = tokenizer.encode(_lowerCamelCase )
a :List[str] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 365 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BartphoTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :Dict = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Tuple = {'''unk_token''': '''<unk>'''}
a :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a :Any = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :int = '''This is a là test'''
a :str = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a :Optional[Any] = '''This is a là test'''
a :Tuple = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
a :int = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = tokens + [tokenizer.unk_token]
a :str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 281 | 0 |
import requests
def lowerCamelCase__ ( a , a ) -> List[str]:
_A: List[str] = {"""Content-Type""": """application/json"""}
_A: str = requests.post(snake_case_ , json={'''text''': message_body} , headers=snake_case_ )
if response.status_code != 2_00:
_A: Union[str, Any] = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 121 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
_snake_case = 11
_snake_case = int("""1""" + """0""" * digit_len )
for num in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
_snake_case = 10
return solutions
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 2 ):
_snake_case = 1.0
for fraction in fraction_list(_SCREAMING_SNAKE_CASE ):
_snake_case = Fraction(_SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution()) | 270 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = DiTPipeline
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowerCAmelCase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase_ = False
def lowercase (self ) -> Union[str, Any]:
torch.manual_seed(0 )
_snake_case = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=UpperCAmelCase , )
_snake_case = AutoencoderKL()
_snake_case = DDIMScheduler()
_snake_case = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def lowercase (self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
if str(UpperCAmelCase ).startswith("""mps""" ):
_snake_case = torch.manual_seed(UpperCAmelCase )
else:
_snake_case = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_snake_case = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowercase (self ) -> Union[str, Any]:
_snake_case = """cpu"""
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = self.get_dummy_inputs(UpperCAmelCase )
_snake_case = pipe(**UpperCAmelCase ).images
_snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_snake_case = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def lowercase (self ) -> List[str]:
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase (self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase (self ) -> Any:
_snake_case = torch.manual_seed(0 )
_snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_snake_case = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_snake_case = pipe.get_label_ids(UpperCAmelCase )
_snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase (self ) -> Union[str, Any]:
_snake_case = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_snake_case = ["""vase""", """umbrella"""]
_snake_case = pipe.get_label_ids(UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
_snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1e-1 | 270 | 1 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase__ : int = u
for i in range(1 , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = temp * (u - i)
return temp
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : int = int(input('''enter the numbers of values: ''' ) )
UpperCamelCase__ : list[list[float]] = []
for _ in range(SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
y[i].append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = 0
print('''enter the values of parameters in a list: ''' )
UpperCamelCase__ : Any = list(map(SCREAMING_SNAKE_CASE , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = float(input() )
UpperCamelCase__ : str = int(input('''enter the value to interpolate: ''' ) )
UpperCamelCase__ : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
UpperCamelCase__ : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase__ : str = y[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE ):
summ += (ucal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(SCREAMING_SNAKE_CASE )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 146 |
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return base * power(SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
__UpperCamelCase : str = int(input("Enter the base: ").strip())
__UpperCamelCase : Dict = int(input("Enter the exponent: ").strip())
__UpperCamelCase : List[Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__UpperCamelCase : Tuple = 1 / result
print(f"{base} to the power of {exponent} is {result}")
| 146 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
_UpperCAmelCase = float(lowercase )
except ValueError:
raise ValueError("""Please enter a valid number""" )
_UpperCAmelCase = decimal - int(lowercase )
if fractional_part == 0:
return int(lowercase ), 1
else:
_UpperCAmelCase = len(str(lowercase ).split(""".""" )[1] )
_UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
_UpperCAmelCase = 10**number_of_frac_digits
_UpperCAmelCase , _UpperCAmelCase = denominator, numerator
while True:
_UpperCAmelCase = dividend % divisor
if remainder == 0:
break
_UpperCAmelCase , _UpperCAmelCase = divisor, remainder
_UpperCAmelCase , _UpperCAmelCase = numerator / divisor, denominator / divisor
return int(lowercase ), int(lowercase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 30 | """simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
else:
_UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase )
_UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
lowercase ,output_loading_info=lowercase )
_UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""]
_UpperCAmelCase = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
_UpperCAmelCase = key.split(""".""" )
if attributes[0] == "lm_head":
_UpperCAmelCase = prophet
_UpperCAmelCase = prophet_old
else:
_UpperCAmelCase = prophet.prophetnet
_UpperCAmelCase = prophet_old.model
_UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
_UpperCAmelCase = mapping[attribute]
if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0:
_UpperCAmelCase = attribute
elif hasattr(lowercase ,lowercase ):
_UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
_UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
_UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ):
_UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
_UpperCAmelCase = getattr(lowercase ,lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_UpperCAmelCase = True
break
if attribute.isdigit():
_UpperCAmelCase = model[int(lowercase )]
_UpperCAmelCase = old_model[int(lowercase )]
else:
_UpperCAmelCase = getattr(lowercase ,lowercase )
if old_attribute == "":
_UpperCAmelCase = old_model
else:
if not hasattr(lowercase ,lowercase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
_UpperCAmelCase = getattr(lowercase ,lowercase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 30 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case ( UpperCAmelCase )-> int:
"""simple docstring"""
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> np.ndarray:
"""simple docstring"""
__A = cva.getAffineTransform(UpperCAmelCase , UpperCAmelCase )
return cva.warpAffine(UpperCAmelCase , UpperCAmelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
a__ : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Optional[int] = gray_img.shape
# set different points to rotate image
a__ : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : int = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : List[str] = plt.figure(1)
a__ : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 161 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return (-y * np.log(SCREAMING_SNAKE_CASE_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase_ ( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict ):
'''simple docstring'''
UpperCamelCase__ = np.dot(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : int, UpperCamelCase__ : str, UpperCamelCase__ : List[str]=7_0000 ):
'''simple docstring'''
UpperCamelCase__ = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = np.dot(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sigmoid_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = np.dot(x.T, h - y ) / y.size
UpperCamelCase__ = theta - alpha * gradient # updating the weights
UpperCamelCase__ = np.dot(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = sigmoid_function(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = cost_function(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase = datasets.load_iris()
lowercase = iris.data[:, :2]
lowercase = (iris.target != 0) * 1
lowercase = 0.1
lowercase = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ):
'''simple docstring'''
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
(lowercase) = (x[:, 0].min(), x[:, 0].max())
(lowercase) = (x[:, 1].min(), x[:, 1].max())
(lowercase) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase = np.c_[xxa.ravel(), xxa.ravel()]
lowercase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 365 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
_A : Any = '''maskformer'''
_A : Any = {'''hidden_size''': '''mask_feature_size'''}
_A : List[str] = ['''resnet''', '''swin''']
_A : Tuple = ['''detr''']
def __init__( self : Optional[Any] , _a : int = 256 , _a : int = 256 , _a : float = 0.1 , _a : bool = False , _a : Optional[Dict] = None , _a : Optional[Dict] = None , _a : float = 0.02 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 20.0 , _a : Optional[bool] = None , **_a : List[str] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
UpperCamelCase__ = backbone_config.pop('''model_type''' )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_a , _a ):
UpperCamelCase__ = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ = config_class.from_dict(_a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = decoder_config
# main feature dimension for the model
UpperCamelCase__ = fpn_feature_size
UpperCamelCase__ = mask_feature_size
# initializer
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ = cross_entropy_weight
UpperCamelCase__ = dice_weight
UpperCamelCase__ = mask_weight
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = no_object_weight
UpperCamelCase__ = output_auxiliary_logits
UpperCamelCase__ = self.decoder_config.encoder_attention_heads
UpperCamelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def A_ ( cls : Tuple , _a : PretrainedConfig , _a : PretrainedConfig , **_a : str ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def A_ ( self : str ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.decoder_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =32
UpperCAmelCase_ =4
UpperCAmelCase_ =4
UpperCAmelCase_ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase_ =False
UpperCAmelCase_ =(320, 640, 1_280, 1_280)
UpperCAmelCase_ =2
UpperCAmelCase_ =8
UpperCAmelCase_ =None
UpperCAmelCase_ =1_280
UpperCAmelCase_ =0.0
UpperCAmelCase_ =False
UpperCAmelCase_ =jnp.floataa
UpperCAmelCase_ =True
UpperCAmelCase_ =0
UpperCAmelCase_ =False
def _UpperCamelCase ( self , _A ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(_A )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A )["params"]
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = down_blocks
# mid
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = list(reversed(_A ) )
SCREAMING_SNAKE_CASE_ = list(reversed(_A ) )
SCREAMING_SNAKE_CASE_ = list(reversed(_A ) )
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[min(i + 1 , len(_A ) - 1 )]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if up_block_type == "CrossAttnUpBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxUpBlockaD(
in_channels=_A , out_channels=_A , prev_output_channel=_A , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = up_blocks
# out
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A=None , _A=None , _A = True , _A = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_A , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(_A , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(_A )
SCREAMING_SNAKE_CASE_ = self.time_embedding(_A )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , _A , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
_A , _A ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = new_down_block_res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(_A , _A , _A , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = down_block_res_samples[-(self.layers_per_block + 1) :]
SCREAMING_SNAKE_CASE_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = up_block(
_A , temb=_A , encoder_hidden_states=_A , res_hidden_states_tuple=_A , deterministic=not train , )
else:
SCREAMING_SNAKE_CASE_ = up_block(_A , temb=_A , res_hidden_states_tuple=_A , deterministic=not train )
# 6. post-process
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
SCREAMING_SNAKE_CASE_ = self.conv_out(_A )
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_A )
| 299 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 299 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : List[str] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : str = 'big_bird'
def __init__( self , SCREAMING_SNAKE_CASE_=5_0358 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=66 , SCREAMING_SNAKE_CASE_="block_sparse" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , sep_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : List[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : str = type_vocab_size
UpperCamelCase : str = layer_norm_eps
UpperCamelCase : Dict = use_cache
UpperCamelCase : List[Any] = rescale_embeddings
UpperCamelCase : List[Any] = attention_type
UpperCamelCase : List[Any] = use_bias
UpperCamelCase : List[Any] = block_size
UpperCamelCase : Any = num_random_blocks
UpperCamelCase : List[str] = classifier_dropout
class lowerCamelCase ( _UpperCAmelCase ):
@property
def a_ ( self ):
if self.task == "multiple-choice":
UpperCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 368 |
"""simple docstring"""
from collections.abc import Callable
def A_ ( snake_case_ : Callable[[float], float] ,snake_case_ : float ,snake_case_ : float ):
'''simple docstring'''
UpperCamelCase : float = a
UpperCamelCase : float = b
if function(snake_case_ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case_ ) == 0:
return b
elif (
function(snake_case_ ) * function(snake_case_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
UpperCamelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(snake_case_ ) == 0:
return mid
elif function(snake_case_ ) * function(snake_case_ ) < 0:
UpperCamelCase : Dict = mid
else:
UpperCamelCase : List[str] = mid
UpperCamelCase : Tuple = start + (end - start) / 2.0
return mid
def A_ ( snake_case_ : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 27 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase : Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase : Union[str, Any] = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCAmelCase : int = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def _SCREAMING_SNAKE_CASE ( a ) -> Any:
__A : int = None
# source code of `config_class`
__A : Tuple = inspect.getsource(a )
__A : int = _re_checkpoint.findall(a )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
__A : str = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__A : List[str] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__A : str = ckpt_name
break
return checkpoint
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__A : Optional[Any] = get_checkpoint_from_config_class(a )
__A : Any = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a )
if len(a ) > 0:
__A : Dict = '\n'.join(sorted(a ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
lowerCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 122 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : List[Any] = "AutoImageProcessor"
_SCREAMING_SNAKE_CASE : int = "AutoTokenizer"
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
__UpperCAmelCase : Dict = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = self.image_processor
__UpperCAmelCase : List[str] = False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
__UpperCAmelCase : List[Any] = kwargs.pop("""images""" , __UpperCAmelCase )
__UpperCAmelCase : int = kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__UpperCAmelCase : List[str] = args[0]
__UpperCAmelCase : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__UpperCAmelCase : Union[str, Any] = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
__UpperCAmelCase : int = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCAmelCase : Tuple = encodings["""input_ids"""]
return inputs
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def __A ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Union[str, Any] = self.tokenizer
yield
__UpperCAmelCase : str = self.image_processor
__UpperCAmelCase : Optional[Any] = False
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
if added_vocab is None:
__UpperCAmelCase : List[Any] = self.tokenizer.get_added_vocab()
__UpperCAmelCase : Union[str, Any] = {}
while tokens:
__UpperCAmelCase : List[str] = re.search(r"""<s_(.*?)>""" , __UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
__UpperCAmelCase : Any = start_token.group(1 )
__UpperCAmelCase : Optional[int] = re.search(rf'</s_{key}>' , __UpperCAmelCase , re.IGNORECASE )
__UpperCAmelCase : Any = start_token.group()
if end_token is None:
__UpperCAmelCase : Optional[int] = tokens.replace(__UpperCAmelCase , """""" )
else:
__UpperCAmelCase : str = end_token.group()
__UpperCAmelCase : str = re.escape(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = re.escape(__UpperCAmelCase )
__UpperCAmelCase : int = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , __UpperCAmelCase , re.IGNORECASE )
if content is not None:
__UpperCAmelCase : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__UpperCAmelCase : Any = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if value:
if len(__UpperCAmelCase ) == 1:
__UpperCAmelCase : str = value[0]
__UpperCAmelCase : Optional[int] = value
else: # leaf nodes
__UpperCAmelCase : str = []
for leaf in content.split(r"""<sep/>""" ):
__UpperCAmelCase : Any = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__UpperCAmelCase : Optional[int] = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCAmelCase )
if len(output[key] ) == 1:
__UpperCAmelCase : Optional[int] = output[key][0]
__UpperCAmelCase : Optional[Any] = tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if len(__UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , )
return self.image_processor
| 254 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
_UpperCamelCase = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = LxmertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
__UpperCAmelCase : Any = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
__UpperCAmelCase : Optional[Any] = do_lower_case
__UpperCAmelCase : Optional[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : str = normalizer_class(**__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = do_lower_case
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 254 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any]=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = ""
else:
UpperCamelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 10_00
UpperCamelCase__ = "datasets/huggingface/label-files"
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 3_84
UpperCamelCase__ = 15_36
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(_UpperCamelCase )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["target_encoder"]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(_UpperCamelCase )
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , base_model=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , base_model=_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
UpperCamelCase__ = image_processor(images=_UpperCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**_UpperCamelCase )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _UpperCamelCase , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowercase: Any = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 31 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ ) | 31 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Any = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
_lowerCAmelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowerCAmelCase : Tuple = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_lowerCAmelCase : List[str] = [2, 4, 1, 5]
_lowerCAmelCase : Optional[int] = len(train_data)
_lowerCAmelCase : int = 0.0_0_9
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]="train" ) -> Tuple:
'''simple docstring'''
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - output(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=m ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 0
for i in range(SCREAMING_SNAKE_CASE__ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE__ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE__ ) * train_data[i][0][index]
return summation_value
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = summation_of_cost_derivative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / m
return cost_derivative_value
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_UpperCAmelCase : List[str] = 0.000_002
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
while True:
j += 1
_UpperCAmelCase : Optional[Any] = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) ):
_UpperCAmelCase : List[str] = get_cost_derivative(i - 1 )
_UpperCAmelCase : Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ , rtol=SCREAMING_SNAKE_CASE__ , ):
break
_UpperCAmelCase : List[Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def __snake_case ( ) -> str:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 202 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCAmelCase : Tuple = spec.loader.load_module()
_lowerCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_lowerCAmelCase : Optional[int] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase : Union[str, Any] = False
# source code of `config_class`
_UpperCAmelCase : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Optional[Any] = True
break
_UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[str] = "\n".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 202 | 1 |
class lowerCAmelCase__ :
def __init__( self : int , SCREAMING_SNAKE_CASE__ : list ) -> None:
__lowerCamelCase = set_counts
__lowerCamelCase = max(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [1] * num_sets
__lowerCamelCase = list(range(SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
__lowerCamelCase = self.get_parent(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_parent(SCREAMING_SNAKE_CASE__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__lowerCamelCase = 0
__lowerCamelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__lowerCamelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__lowerCamelCase = 0
__lowerCamelCase = src_parent
__lowerCamelCase = self.set_counts[src_parent]
__lowerCamelCase = max(self.max_set , SCREAMING_SNAKE_CASE__ )
return True
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
__lowerCamelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 270 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 1 |
from __future__ import annotations
import math
import random
from typing import Any
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 0
def UpperCAmelCase_ (self ):
return self.head == self.tail
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
self.data.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.tail + 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.data[self.head]
UpperCamelCase__ = self.head + 1
return ret
def UpperCAmelCase_ (self ):
return self.tail - self.head
def UpperCAmelCase_ (self ):
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
def UpperCAmelCase_ (self ):
return self.data
def UpperCAmelCase_ (self ):
return self.left
def UpperCAmelCase_ (self ):
return self.right
def UpperCAmelCase_ (self ):
return self.height
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = data
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = node
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = node
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = height
def __magic_name__ ( __a : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
if a > b:
return a
return b
def __magic_name__ ( __a : MyNode ):
'''simple docstring'''
print("""left rotation node:""" , node.get_data() )
UpperCamelCase__ = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(__a )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__a )
UpperCamelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__a )
return ret
def __magic_name__ ( __a : MyNode ):
'''simple docstring'''
print("""right rotation node:""" , node.get_data() )
UpperCamelCase__ = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(__a )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__a )
UpperCamelCase__ = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(__a )
return ret
def __magic_name__ ( __a : MyNode ):
'''simple docstring'''
UpperCamelCase__ = node.get_left()
assert left_child is not None
node.set_left(left_rotation(__a ) )
return right_rotation(__a )
def __magic_name__ ( __a : MyNode ):
'''simple docstring'''
UpperCamelCase__ = node.get_right()
assert right_child is not None
node.set_right(right_rotation(__a ) )
return left_rotation(__a )
def __magic_name__ ( __a : MyNode | None , __a : Any ):
'''simple docstring'''
if node is None:
return MyNode(__a )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , __a ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
UpperCamelCase__ = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
UpperCamelCase__ = right_rotation(__a )
else:
UpperCamelCase__ = lr_rotation(__a )
else:
node.set_right(insert_node(node.get_right() , __a ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
UpperCamelCase__ = node.get_right()
assert right_child is not None
if data < right_child.get_data():
UpperCamelCase__ = rl_rotation(__a )
else:
UpperCamelCase__ = left_rotation(__a )
UpperCamelCase__ = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(__a )
return node
def __magic_name__ ( __a : MyNode ):
'''simple docstring'''
while True:
UpperCamelCase__ = root.get_right()
if right_child is None:
break
UpperCamelCase__ = right_child
return root.get_data()
def __magic_name__ ( __a : MyNode ):
'''simple docstring'''
while True:
UpperCamelCase__ = root.get_left()
if left_child is None:
break
UpperCamelCase__ = left_child
return root.get_data()
def __magic_name__ ( __a : MyNode , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = root.get_left()
UpperCamelCase__ = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
UpperCamelCase__ = get_left_most(__a )
root.set_data(__a )
root.set_right(del_node(__a , __a ) )
elif left_child is not None:
UpperCamelCase__ = left_child
elif right_child is not None:
UpperCamelCase__ = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(__a , __a ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(__a , __a ) )
if get_height(__a ) - get_height(__a ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
UpperCamelCase__ = left_rotation(__a )
else:
UpperCamelCase__ = rl_rotation(__a )
elif get_height(__a ) - get_height(__a ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
UpperCamelCase__ = right_rotation(__a )
else:
UpperCamelCase__ = lr_rotation(__a )
UpperCamelCase__ = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(__a )
return root
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = None
def UpperCAmelCase_ (self ):
return get_height(self.root )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
print("""insert:""" + str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = insert_node(self.root , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
print("""delete:""" + str(SCREAMING_SNAKE_CASE_ ) )
if self.root is None:
print("""Tree is empty!""" )
return
UpperCamelCase__ = del_node(self.root , SCREAMING_SNAKE_CASE_ )
def __str__(self , ): # a level traversale, gives a more intuitive look on the tree
UpperCamelCase__ = """"""
UpperCamelCase__ = MyQueue()
q.push(self.root )
UpperCamelCase__ = self.get_height()
if layer == 0:
return output
UpperCamelCase__ = 0
while not q.is_empty():
UpperCamelCase__ = q.pop()
UpperCamelCase__ = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(SCREAMING_SNAKE_CASE_ )
q.push(SCREAMING_SNAKE_CASE_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
UpperCamelCase__ = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , SCREAMING_SNAKE_CASE_ ) - 1:
UpperCamelCase__ = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
lowerCamelCase_ = AVLtree()
lowerCamelCase_ = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 178 |
import argparse
from collections import defaultdict
import yaml
lowerCamelCase_ = '''docs/source/en/_toctree.yml'''
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCamelCase__ = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ = []
for duplicate_key in duplicates:
UpperCamelCase__ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(__a , key=lambda __a : s["title"].lower() )
def __magic_name__ ( __a : int=False ):
'''simple docstring'''
with open(__a , encoding="""utf-8""" ) as f:
UpperCamelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ = content[api_idx]["""sections"""]
# Then to the model doc
UpperCamelCase__ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCamelCase__ = api_doc[model_idx]["""sections"""]
UpperCamelCase__ = [(idx, section) for idx, section in enumerate(__a ) if """sections""" in section]
UpperCamelCase__ = False
for idx, modality_doc in modalities_docs:
UpperCamelCase__ = modality_doc["""sections"""]
UpperCamelCase__ = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
UpperCamelCase__ = True
if overwrite:
UpperCamelCase__ = new_modality_doc
if diff:
if overwrite:
UpperCamelCase__ = model_doc
UpperCamelCase__ = api_doc
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 178 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def a ( *snake_case__: Optional[Any] ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
lowercase_ = list(snake_case__ )
for i in range(len(snake_case__ ) ):
lowercase_ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def a ( snake_case__: Exception ):
'''simple docstring'''
lowercase_ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(snake_case__ , snake_case__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def a ( snake_case__: callable = None , snake_case__: int = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(snake_case__ , starting_batch_size=snake_case__ )
lowercase_ = starting_batch_size
def decorator(*snake_case__: List[str] , **snake_case__: List[str] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase_ = list(inspect.signature(snake_case__ ).parameters.keys() )
# Guard against user error
if len(snake_case__ ) < (len(snake_case__ ) + 1):
lowercase_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(snake_case__ , *snake_case__ , **snake_case__ )
except Exception as e:
if should_reduce_batch_size(snake_case__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 30 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['DeiTFeatureExtractor']
__a = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 | 1 |
import qiskit
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
a :Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
a :Union[str, Any] = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
a :Optional[int] = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Optional[int] = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 281 |
def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ):
"""simple docstring"""
a :List[Any] = 0
a :List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281 | 1 |
def UpperCamelCase ( __lowerCamelCase : int = 1000 ):
snake_case : List[Any] = 2**power
snake_case : Any = 0
while n:
snake_case , snake_case : Tuple = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 59 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 35 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=0.9 , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 30}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"height": 30, "width": 30}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize_and_center_crop
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = crop_pct
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __A ( self ) -> Optional[int]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = PoolFormerImageProcessingTester(self )
@property
def __A ( self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_a , 'size' ) )
self.assertTrue(hasattr(_a , 'crop_pct' ) )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'image_mean' ) )
self.assertTrue(hasattr(_a , 'image_std' ) )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __A ( self ) -> List[Any]:
pass
def __A ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ) -> Tuple:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 369 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]="attention" ) -> List[Any]:
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=False ) -> List[Any]:
if split_mlp_wi:
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase (SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple:
SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables['target'] )
SCREAMING_SNAKE_CASE = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_attention_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'attention' )
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE = wi[0].T
SCREAMING_SNAKE_CASE = wi[1].T
else:
SCREAMING_SNAKE_CASE = wi.T
SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' ).T
SCREAMING_SNAKE_CASE = old['encoder/encoder_norm/scale']
if not scalable_attention:
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , 0 , 'encoder' ).T
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_self_attention_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'self_attention' )
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_cross_attention_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'encoder_decoder_attention' )
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE = wi[0].T
SCREAMING_SNAKE_CASE = wi[1].T
else:
SCREAMING_SNAKE_CASE = wi.T
SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' ).T
SCREAMING_SNAKE_CASE = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE = old['decoder/logits_dense/kernel'].T
return new
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool ) -> int:
SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
return state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ , scalable_attention=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Any:
SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE = UMTaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print('Done' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 38 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case = 'base_with_context'
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
_a = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
_a = weights[f'layers_{lyr_num}']
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_a = ly_weight['attention']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
_a = weights[f'layers_{lyr_num}']
_a = ly_weight['attention']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=_SCREAMING_SNAKE_CASE )
_a = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_a = weights[f'layers_{lyr_num}']
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
_a = ly_weight['self_attention']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = ly_weight['MultiHeadDotProductAttention_0']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_a = jnp.tree_util.tree_map(onp.array, _SCREAMING_SNAKE_CASE )
_a = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_a = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' )
_a = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
_a = inference.InferenceModel(args.checkpoint_path, _SCREAMING_SNAKE_CASE )
_a = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' )
_a = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
_a = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
_a = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
_a = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], _SCREAMING_SNAKE_CASE )
_a = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], _SCREAMING_SNAKE_CASE )
_a = load_decoder(ta_checkpoint['''target''']['''decoder'''], _SCREAMING_SNAKE_CASE )
_a = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
_a = SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE, continuous_encoder=_SCREAMING_SNAKE_CASE, decoder=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, melgan=_SCREAMING_SNAKE_CASE, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__snake_case = parser.parse_args()
main(args) | 320 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : Optional[Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase_ :
lowercase = PegasusConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=5 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Tuple:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : Any = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = eos_token_id
UpperCAmelCase : Optional[Any] = pad_token_id
UpperCAmelCase : Optional[int] = bos_token_id
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : int = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : Tuple = 20
UpperCAmelCase : Tuple = model_class_name(A )
UpperCAmelCase : List[str] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase , UpperCAmelCase : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , A , A )
UpperCAmelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
UpperCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
UpperCAmelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
UpperCAmelCase : List[Any] = model.decode(A , A )
UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowercase( self , A , A , A ) -> Dict:
UpperCAmelCase : Optional[int] = 20
UpperCAmelCase : List[Any] = model_class_name(A )
UpperCAmelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , A , A )
UpperCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase : int = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
UpperCAmelCase : str = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
UpperCAmelCase : str = model.decode(A , A , decoder_attention_mask=A )
UpperCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase : List[str] = np.not_equal(_lowercase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Dict = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> int:
UpperCAmelCase : Any = FlaxPegasusModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Any:
self.config_tester.run_common_tests()
def _lowercase( self ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : Optional[int] = self._prepare_for_class(A , A )
UpperCAmelCase : Tuple = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase : Tuple = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase : Union[str, Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : List[Any] = model_class(A )
UpperCAmelCase : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
UpperCAmelCase : List[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase : Union[str, Any] = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase : Union[str, Any] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase( self ) -> str:
for model_class_name in self.all_model_classes:
UpperCAmelCase : Tuple = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=A )
UpperCAmelCase : Union[str, Any] = np.ones((1, 1) )
UpperCAmelCase : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase : List[str] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase : Optional[int] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase : Optional[int] = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase : Dict = tokenizer(A , return_tensors="""np""" , truncation=A , max_length=512 , padding=A )
UpperCAmelCase : Any = model.generate(**A , num_beams=2 ).sequences
UpperCAmelCase : int = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 338 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase_ :
lowercase = MBartConfig
lowercase = {}
lowercase = 'gelu'
def __init__( self , A , A=13 , A=7 , A=True , A=False , A=99 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=20 , A=2 , A=1 , A=0 , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = is_training
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[Any] = eos_token_id
UpperCAmelCase : List[str] = pad_token_id
UpperCAmelCase : List[Any] = bos_token_id
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase : List[Any] = prepare_mbart_inputs_dict(A , A , A )
return config, inputs_dict
def _lowercase( self , A , A ) -> List[str]:
UpperCAmelCase : List[str] = TFMBartModel(config=A ).get_decoder()
UpperCAmelCase : int = inputs_dict["""input_ids"""]
UpperCAmelCase : str = input_ids[:1, :]
UpperCAmelCase : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : List[str] = inputs_dict["""head_mask"""]
UpperCAmelCase : List[Any] = 1
# first forward pass
UpperCAmelCase : List[str] = model(A , attention_mask=A , head_mask=A , use_cache=A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = outputs.to_tuple()
UpperCAmelCase : int = past_key_values[1]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[str]:
if attention_mask is None:
UpperCAmelCase : Tuple = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def _lowercase( self , A , A , A , A , A ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = TFMBartModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _lowercase( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Dict:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
lowercase = 'facebook/mbart-large-en-ro'
@cached_property
def _lowercase( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase( self , **A ) -> Any:
UpperCAmelCase : Optional[int] = self.translate_src_text(**A )
self.assertListEqual(self.expected_text , A )
def _lowercase( self , **A ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.tokenizer(self.src_text , **A , return_tensors="""tf""" )
UpperCAmelCase : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase : Any = self.tokenizer.batch_decode(A , skip_special_tokens=A )
return generated_words
@slow
def _lowercase( self ) -> List[Any]:
self._assert_generated_batch_equal_expected()
| 338 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = """Speech2TextFeatureExtractor"""
A__ : Optional[int] = """Speech2TextTokenizer"""
def __init__( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
super().__init__(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
def __call__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
UpperCamelCase_ = kwargs.pop("""raw_speech""" )
else:
UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase )
UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
UpperCamelCase_ = args[0]
UpperCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase )
if text is not None:
UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase_ = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
UpperCamelCase_ = True
UpperCamelCase_ = self.tokenizer
yield
UpperCamelCase_ = self.feature_extractor
UpperCamelCase_ = False
| 122 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_A = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
A__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = ZeroShotClassificationPipeline(
model=__UpperCamelCase , tokenizer=__UpperCamelCase , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
# No kwarg
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCamelCase_ = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCamelCase_ = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(1 )
] , )
UpperCamelCase_ = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__UpperCamelCase ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(__UpperCamelCase ):
classifier(__UpperCamelCase , candidate_labels="""politics""" )
with self.assertRaises(__UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(__UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__UpperCamelCase , )
self.run_entailment_id(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = zero_shot_classifier.model.config
UpperCamelCase_ = config.labelaid
UpperCamelCase_ = zero_shot_classifier.entailment_id
UpperCamelCase_ = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCamelCase_ = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCamelCase_ = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCamelCase_ = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCamelCase_ = original_labelaid
self.assertEqual(__UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_0_0 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
UpperCamelCase_ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
UpperCamelCase_ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 122 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if os.path.exists(_UpperCamelCase ):
if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''config.json''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) )
if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) )
else:
os.makedirs(_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 2
if unlogit:
snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase )
snake_case_ : Dict = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) )
for row in range(len(_UpperCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device )
if head_mask is None:
snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case_ : Dict = None
snake_case_ : Tuple = 0.0
snake_case_ : Dict = 0.0
for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
snake_case_ : Any = tuple(t.to(args.device ) for t in inputs )
((snake_case_) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case_ , snake_case_ , snake_case_ : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCamelCase ):
snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case_ : Union[str, Any] = 2
snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_UpperCamelCase )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_UpperCamelCase )
logger.info('''Head ranked by importance scores''' )
snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case_ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase )
print_ad_tensor(_UpperCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase )
snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold )
snake_case_ : Any = torch.ones_like(_UpperCamelCase )
snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case_ : Optional[Any] = float('''Inf''' )
snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1]
if len(_UpperCamelCase ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
snake_case_ : Optional[Any] = new_head_mask.view(-1 )
snake_case_ : int = 0.0
snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase )
snake_case_ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(_UpperCamelCase )
# Compute metric and head importance again
snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Tuple = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_UpperCamelCase )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : str = datetime.now()
snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Union[str, Any] = datetime.now() - before_time
snake_case_ : int = sum(p.numel() for p in model.parameters() )
snake_case_ : Tuple = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Any = [
v,
]
assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCamelCase )
snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case_ : Dict = datetime.now()
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , )
snake_case_ : Union[str, Any] = 1 / loss
snake_case_ : Optional[Any] = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_UpperCamelCase , args.output_dir )
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank )
snake_case_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case_ : Any = nn.parallel.DistributedDataParallel(
_UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase )
elif args.n_gpu > 1:
snake_case_ : Dict = nn.DataParallel(_UpperCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCamelCase )
torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase )
# Prepare dataset
snake_case_ : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),)
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : List[str] = RandomSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__SCREAMING_SNAKE_CASE : Optional[Any] = """src/diffusers"""
__SCREAMING_SNAKE_CASE : int = """."""
# This is to make sure the diffusers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : Dict = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__SCREAMING_SNAKE_CASE : Tuple = spec.loader.load_module()
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> int:
"""simple docstring"""
return line.startswith(_UpperCAmelCase ) or len(_UpperCAmelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _UpperCAmelCase ) is not None
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = object_name.split("." )
_UpperCAmelCase : Optional[Any] = 0
# First let's find the module where our object lives.
_UpperCAmelCase : int = parts[i]
while i < len(_UpperCAmelCase ) and not os.path.isfile(os.path.join(_UpperCAmelCase , F"""{module}.py""" ) ):
i += 1
if i < len(_UpperCAmelCase ):
_UpperCAmelCase : Any = os.path.join(_UpperCAmelCase , parts[i] )
if i >= len(_UpperCAmelCase ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_UpperCAmelCase , F"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase : Dict = ""
_UpperCAmelCase : List[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCAmelCase ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCAmelCase ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase : Any = line_index
while line_index < len(_UpperCAmelCase ) and _should_continue(lines[line_index] , _UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase : List[str] = lines[start_index:line_index]
return "".join(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE : int = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__SCREAMING_SNAKE_CASE : Tuple = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__SCREAMING_SNAKE_CASE : str = re.compile(R"""<FILL\s+[^>]*>""")
def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = code.split("\n" )
_UpperCAmelCase : Tuple = 0
while idx < len(_UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCAmelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = len(get_indent(_UpperCAmelCase ) ) > 0
if has_indent:
_UpperCAmelCase : Optional[Any] = F"""class Bla:\n{code}"""
_UpperCAmelCase : str = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_UpperCAmelCase )
_UpperCAmelCase : Tuple = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Tuple = style_docstrings_in_code(_UpperCAmelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=False ) -> Tuple:
"""simple docstring"""
with open(_UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Optional[int] = f.readlines()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCAmelCase ):
_UpperCAmelCase : List[str] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = search.groups()
_UpperCAmelCase : List[Any] = find_code_in_diffusers(_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = get_indent(_UpperCAmelCase )
_UpperCAmelCase : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase : int = theoretical_indent
_UpperCAmelCase : List[str] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase : int = True
while line_index < len(_UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCAmelCase ):
break
_UpperCAmelCase : Optional[int] = lines[line_index]
_UpperCAmelCase : Tuple = _should_continue(_UpperCAmelCase , _UpperCAmelCase ) and re.search(F"""^{indent}# End copy""" , _UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase : Any = lines[start_index:line_index]
_UpperCAmelCase : Union[str, Any] = "".join(_UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase : List[Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(_UpperCAmelCase ) is None]
_UpperCAmelCase : str = "\n".join(_UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase : Optional[Any] = replace_pattern.replace("with" , "" ).split("," )
_UpperCAmelCase : str = [_re_replace_pattern.search(_UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = pattern.groups()
_UpperCAmelCase : Tuple = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if option.strip() == "all-casing":
_UpperCAmelCase : int = re.sub(obja.lower() , obja.lower() , _UpperCAmelCase )
_UpperCAmelCase : List[Any] = re.sub(obja.upper() , obja.upper() , _UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase : Dict = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase : Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase : Union[str, Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase : int = start_index + 1
if overwrite and len(_UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCAmelCase )
return diffs
def UpperCamelCase_ ( _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = glob.glob(os.path.join(_UpperCAmelCase , "**/*.py" ) , recursive=_UpperCAmelCase )
_UpperCAmelCase : str = []
for filename in all_files:
_UpperCAmelCase : Any = is_copy_consistent(_UpperCAmelCase , _UpperCAmelCase )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_UpperCAmelCase ) > 0:
_UpperCAmelCase : str = "\n".join(_UpperCAmelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 31 | '''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker | 325 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x | 325 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279 | 0 |
_lowercase : Optional[int] =[
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowerCAmelCase_ ( _lowercase : str) -> int:
"""simple docstring"""
a__ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
a__ : List[Any] = 0
a__ : Dict = 0
while place < len(_lowercase):
if (place + 1 < len(_lowercase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowercase : int) -> str:
"""simple docstring"""
a__ : Optional[Any] = []
for arabic, roman in ROMAN:
((a__) , (a__)) : Optional[Any] = divmod(_lowercase , _lowercase)
result.append(roman * factor)
if number == 0:
break
return "".join(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase : Any ="\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowercase : str ="\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowercase : Optional[Any] ="\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , ) -> Any:
"""simple docstring"""
a__ : Any = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
a__ : str = [[refs[i] for refs in references] for i in range(__lowercase )]
a__ : int = TER(
normalized=__lowercase , no_punct=__lowercase , asian_support=__lowercase , case_sensitive=__lowercase , )
a__ : Optional[int] = sb_ter.corpus_score(__lowercase , __lowercase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 266 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE_ : str = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["pixel_values"]
def __init__( self: Optional[Any] , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: bool = True , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: bool = True , UpperCamelCase: Union[int, float] = 1 / 2_55 , UpperCamelCase: bool = True , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: bool = True , **UpperCamelCase: List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = size if size is not None else {"""shortest_edge""": 2_24}
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
A__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def UpperCamelCase ( self: Dict , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: Any , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
A__ = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: np.ndarray , UpperCamelCase: Dict[str, int] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: str , ):
"""simple docstring"""
A__ = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[int, float] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: int , ):
"""simple docstring"""
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: np.ndarray , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Union[float, List[float]] , UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase: List[str] , ):
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: ImageInput , UpperCamelCase: bool = None , UpperCamelCase: Dict[str, int] = None , UpperCamelCase: PILImageResampling = None , UpperCamelCase: bool = None , UpperCamelCase: int = None , UpperCamelCase: bool = None , UpperCamelCase: float = None , UpperCamelCase: bool = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: Optional[Union[float, List[float]]] = None , UpperCamelCase: bool = None , UpperCamelCase: Optional[Union[str, TensorType]] = None , UpperCamelCase: Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase: Tuple , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
A__ = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
A__ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 335 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 1 |
"""simple docstring"""
import baseaa
def lowercase__ ( lowercase_ ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
return baseaa.baadecode(lowercase_ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = "Hello World!"
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded) | 362 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ )
if weight_type is not None:
_UpperCamelCase : str = getattr(lowercase_ ,lowercase_ ).shape
else:
_UpperCamelCase : int = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
_UpperCamelCase : int = value
elif weight_type == "weight_v":
_UpperCamelCase : Optional[Any] = value
elif weight_type == "bias":
_UpperCamelCase : int = value
else:
_UpperCamelCase : Any = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : List[str] = []
_UpperCamelCase : Any = fairseq_model.state_dict()
_UpperCamelCase : Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,hf_model.config.feat_extract_norm == "group" ,)
_UpperCamelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase : Any = True
if "*" in mapped_key:
_UpperCamelCase : Dict = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase : Any = mapped_key.replace("*" ,lowercase_ )
if "weight_g" in name:
_UpperCamelCase : str = "weight_g"
elif "weight_v" in name:
_UpperCamelCase : Any = "weight_v"
elif "weight" in name:
_UpperCamelCase : List[str] = "weight"
elif "bias" in name:
_UpperCamelCase : List[Any] = "bias"
else:
_UpperCamelCase : str = None
set_recursively(lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Any = full_name.split("conv_layers." )[-1]
_UpperCamelCase : Optional[Any] = name.split("." )
_UpperCamelCase : Union[str, Any] = int(items[0] )
_UpperCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase_ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Dict = SEWConfig()
if is_finetuned:
_UpperCamelCase : Dict = model.wav_encoder.wav_model.cfg
else:
_UpperCamelCase : List[Any] = model.cfg
_UpperCamelCase : Any = fs_config.conv_bias
_UpperCamelCase : str = eval(fs_config.conv_feature_layers )
_UpperCamelCase : Any = [x[0] for x in conv_layers]
_UpperCamelCase : List[Any] = [x[1] for x in conv_layers]
_UpperCamelCase : Union[str, Any] = [x[2] for x in conv_layers]
_UpperCamelCase : str = "gelu"
_UpperCamelCase : List[str] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_UpperCamelCase : Optional[int] = 0.0
_UpperCamelCase : Dict = fs_config.activation_fn.name
_UpperCamelCase : Any = fs_config.encoder_embed_dim
_UpperCamelCase : Optional[Any] = 0.02
_UpperCamelCase : str = fs_config.encoder_ffn_embed_dim
_UpperCamelCase : int = 1e-5
_UpperCamelCase : Optional[int] = fs_config.encoder_layerdrop
_UpperCamelCase : str = fs_config.encoder_attention_heads
_UpperCamelCase : Tuple = fs_config.conv_pos_groups
_UpperCamelCase : List[str] = fs_config.conv_pos
_UpperCamelCase : Optional[int] = len(lowercase_ )
_UpperCamelCase : Union[str, Any] = fs_config.encoder_layers
_UpperCamelCase : Union[str, Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCamelCase : List[str] = model.cfg
_UpperCamelCase : List[str] = fs_config.final_dropout
_UpperCamelCase : Optional[Any] = fs_config.layerdrop
_UpperCamelCase : int = fs_config.activation_dropout
_UpperCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCamelCase : int = fs_config.attention_dropout
_UpperCamelCase : int = fs_config.dropout_input
_UpperCamelCase : List[Any] = fs_config.dropout
_UpperCamelCase : List[Any] = fs_config.mask_channel_length
_UpperCamelCase : List[str] = fs_config.mask_channel_prob
_UpperCamelCase : Optional[Any] = fs_config.mask_length
_UpperCamelCase : Optional[int] = fs_config.mask_prob
_UpperCamelCase : List[str] = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=True ) -> str:
"""simple docstring"""
if is_finetuned:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCamelCase : str = SEWConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase : Optional[int] = convert_config(model[0] ,lowercase_ )
_UpperCamelCase : List[str] = model[0].eval()
_UpperCamelCase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
_UpperCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=lowercase_ ,return_attention_mask=lowercase_ ,)
if is_finetuned:
if dict_path:
_UpperCamelCase : Union[str, Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase : List[str] = target_dict.pad_index
_UpperCamelCase : Optional[int] = target_dict.bos_index
_UpperCamelCase : Any = target_dict.pad_index
_UpperCamelCase : List[Any] = target_dict.bos_index
_UpperCamelCase : List[str] = target_dict.eos_index
_UpperCamelCase : Optional[Any] = len(target_dict.symbols )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"vocab.json" )
if not os.path.isdir(lowercase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowercase_ ) )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices ,lowercase_ )
_UpperCamelCase : Optional[Any] = WavaVecaCTCTokenizer(
lowercase_ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=lowercase_ ,)
_UpperCamelCase : List[str] = WavaVecaProcessor(feature_extractor=lowercase_ ,tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_UpperCamelCase : List[Any] = SEWForCTC(lowercase_ )
else:
_UpperCamelCase : int = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ ,lowercase_ ,lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCamelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 310 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : List[Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 281 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : str = "▁"
snake_case : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = BigBirdTokenizer
UpperCamelCase__ = BigBirdTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
__magic_name__ : Optional[Any] = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = "<s>"
__magic_name__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_a ) , 1_004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Any = "I was born in 92000, and this is falsé."
__magic_name__ : Dict = tokenizer.tokenize(_a )
__magic_name__ : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__magic_name__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
__magic_name__ : List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__magic_name__ : str = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(_a )
__magic_name__ : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = BigBirdTokenizer(_a , keep_accents=_a )
__magic_name__ : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , )
__magic_name__ : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = "Hello World!"
__magic_name__ : Dict = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
__magic_name__ : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__magic_name__ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : List[Any] = " ".join(_a )
__magic_name__ : Any = self.big_tokenizer.encode_plus(_a , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_a )
__magic_name__ : List[str] = BigBirdConfig(attention_type="original_full" )
__magic_name__ : Optional[int] = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
__magic_name__ : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
__magic_name__ : Optional[Any] = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 281 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 344 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ):
UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase :str = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Dict = num_channels
UpperCamelCase :str = image_size
UpperCamelCase :Dict = min_resolution
UpperCamelCase :str = max_resolution
UpperCamelCase :Union[str, Any] = do_resize
UpperCamelCase :Optional[Any] = size
UpperCamelCase :Any = do_normalize
UpperCamelCase :Optional[Any] = image_mean
UpperCamelCase :Tuple = image_std
def _A ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None
def _A ( self : str ):
UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self )
@property
def _A ( self : List[str] ):
return self.image_proc_tester.prepare_image_processor_dict()
def _A ( self : int ):
UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
def _A ( self : Optional[int] ):
pass
def _A ( self : str ):
# Initialize image_processor
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processor
UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : List[Any] ):
# Initialize image_processor
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 38 | 0 |
from math import ceil
def lowerCAmelCase_ ( __lowerCAmelCase = 10_01 )-> int:
'''simple docstring'''
UpperCAmelCase : Any =1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase : Any =2 * i + 1
UpperCAmelCase : Union[str, Any] =2 * i
UpperCAmelCase : Tuple =total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__snake_case = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 78 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.dummy_unet
UpperCAmelCase : Tuple =self.dummy_movq
UpperCAmelCase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create hint
UpperCAmelCase : Tuple =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : int =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''cpu'''
UpperCAmelCase : List[Any] =self.get_dummy_components()
UpperCAmelCase : Tuple =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Tuple =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[int] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : str =output.images
UpperCAmelCase : List[str] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] =np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase : int =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
UpperCAmelCase : List[str] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : int =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int ='''A robot, 4k photo'''
UpperCAmelCase : int =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase : Dict =pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 78 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : str = PegasusConfig
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : Optional[Any] = """gelu"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , ) ->List[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowerCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase , lowerCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = 20
lowerCAmelCase = model_class_name(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase , lowerCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ) -> Optional[int]:
if attention_mask is None:
lowerCAmelCase = np.not_equal(snake_case__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowerCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowercase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCAmelCase_ : int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCAmelCase_ : int = True
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = FlaxPegasusModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.ones((1, 1) )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
lowerCAmelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowerCAmelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''np''' , truncation=__SCREAMING_SNAKE_CASE , max_length=512 , padding=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.generate(**__SCREAMING_SNAKE_CASE , num_beams=2 ).sequences
lowerCAmelCase = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 338 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
lowerCAmelCase = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='''my_dataset''' )] )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 338 | 1 |
"""simple docstring"""
import math
import sys
def __A ( a_ :str) -> str:
__a : Any = ''''''
try:
with open(a_ , '''rb''') as binary_file:
__a : Dict = binary_file.read()
for dat in data:
__a : Any = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''')
sys.exit()
def __A ( a_ :str) -> str:
__a : List[str] = {'''0''': '''0''', '''1''': '''1'''}
__a , __a : str = '''''', ''''''
__a : Tuple = len(a_)
for i in range(len(a_)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a : Optional[int] = lexicon[curr_string]
result += last_match_id
__a : Optional[Any] = last_match_id + '''0'''
if math.loga(a_).is_integer():
__a : Union[str, Any] = {}
for curr_key in list(a_):
__a : str = lexicon.pop(a_)
__a : List[Any] = new_lex
__a : List[str] = last_match_id + '''1'''
index += 1
__a : int = ''''''
return result
def __A ( a_ :str , a_ :str) -> None:
__a : Optional[int] = 8
try:
with open(a_ , '''wb''') as opened_file:
__a : int = [
to_write[i : i + byte_length]
for i in range(0 , len(a_) , a_)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('''10000000''')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(a_ , 2).to_bytes(1 , byteorder='''big'''))
except OSError:
print('''File not accessible''')
sys.exit()
def __A ( a_ :str) -> str:
__a : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__a : List[Any] = data_bits[counter:]
__a : str = data_bits[counter + 1 :]
return data_bits
def __A ( a_ :str , a_ :str) -> None:
__a : int = read_file_binary(a_)
__a : Any = remove_prefix(a_)
__a : Dict = decompress_data(a_)
write_file_binary(a_ , a_)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 188 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 2
@add_end_docstrings(_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__a : List[Any] = None
if self.model.config.prefix is not None:
__a : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__a : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__a , __a , __a : Optional[int] = self._sanitize_parameters(prefix=_UpperCAmelCase , **self._forward_params )
__a : Dict = {**self._preprocess_params, **preprocess_params}
__a : str = {**self._forward_params, **forward_params}
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : str = {}
if prefix is not None:
__a : Tuple = prefix
if prefix:
__a : str = self.tokenizer(
_UpperCAmelCase , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework )
__a : List[str] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
__a : str = handle_long_generation
preprocess_params.update(_UpperCAmelCase )
__a : int = generate_kwargs
__a : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__a : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__a : List[Any] = ReturnType.TENSORS
if return_type is not None:
__a : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
__a : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Any = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_UpperCAmelCase , **_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Tuple = self.tokenizer(
prefix + prompt_text , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework )
__a : int = prompt_text
if handle_long_generation == "hole":
__a : str = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__a : Tuple = generate_kwargs['''max_new_tokens''']
else:
__a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__a : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__a : int = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
__a : str = model_inputs['''input_ids''']
__a : Dict = model_inputs.get('''attention_mask''' , _UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__a : str = None
__a : List[Any] = None
__a : Any = 1
else:
__a : List[Any] = input_ids.shape[0]
__a : str = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__a : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__a : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__a : Tuple = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__a : List[str] = self.model.generate(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
__a : int = generated_sequence.shape[0]
if self.framework == "pt":
__a : Union[str, Any] = generated_sequence.reshape(_UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__a : Dict = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=ReturnType.FULL_TEXT , _UpperCAmelCase=True ):
__a : Optional[Any] = model_outputs['''generated_sequence'''][0]
__a : List[str] = model_outputs['''input_ids''']
__a : Optional[Any] = model_outputs['''prompt_text''']
__a : str = generated_sequence.numpy().tolist()
__a : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__a : Union[str, Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__a : List[str] = self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__a : Dict = 0
else:
__a : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__a : Any = prompt_text + text[prompt_length:]
else:
__a : Any = text[prompt_length:]
__a : Dict = {'''generated_text''': all_text}
records.append(_UpperCAmelCase )
return records | 188 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Optional[int] = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class a__ ( a_ ):
__lowerCAmelCase = """funnel"""
__lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
lowercase : Tuple = vocab_size
lowercase : int = block_sizes
lowercase : Optional[Any] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase : str = num_decoder_layers
lowercase : int = d_model
lowercase : str = n_head
lowercase : int = d_head
lowercase : Optional[int] = d_inner
lowercase : Any = hidden_act
lowercase : Union[str, Any] = hidden_dropout
lowercase : Union[str, Any] = attention_dropout
lowercase : Tuple = activation_dropout
lowercase : List[Any] = initializer_range
lowercase : Optional[Any] = initializer_std
lowercase : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
lowercase : int = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
lowercase : List[Any] = attention_type
lowercase : Optional[int] = separate_cls
lowercase : List[Any] = truncate_seq
lowercase : Optional[int] = pool_q_only
super().__init__(**_a )
@property
def __magic_name__ ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __magic_name__ ( self , _a ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __magic_name__ ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __magic_name__ ( self , _a ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 202 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_A : Optional[int] = """
Human: <<task>>
Assistant: """
_A : List[Any] = """huggingface-tools/default-prompts"""
_A : Optional[int] = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def __magic_name__ ( __snake_case : int , __snake_case : List[Any] , __snake_case : Dict="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
lowercase : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __snake_case ) is not None:
return prompt_or_repo_id
lowercase : Optional[int] = cached_file(
__snake_case , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__snake_case , "r" , encoding="utf-8" ) as f:
return f.read()
| 202 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __magic_name__ ( self : Any ) -> Union[str, Any]:
with self.assertRaises(__lowercase ):
SCREAMING_SNAKE_CASE__ : Dict =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def __magic_name__ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any =pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __magic_name__ ( self : Union[str, Any] ) -> str:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ : Tuple =pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def __magic_name__ ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __magic_name__ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Any =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE__ : str =pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def __magic_name__ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def __magic_name__ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __magic_name__ ( self : int ) -> List[str]:
import PIL.Image
SCREAMING_SNAKE_CASE__ : str =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=__lowercase ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE__ : List[Any] =pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image() ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , __lowercase )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =pa.BufferReader(UpperCamelCase__ ) if isinstance(UpperCamelCase__, pa.Buffer ) else pa.memory_map(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =pa.ipc.open_stream(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : pa.Table =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : int =pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__, schema=UpperCamelCase__, writer_batch_size=UpperCamelCase__ ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : Dict ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : str =Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=UpperCamelCase__, features=UpperCamelCase__ ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pa.ipc.open_stream(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : pa.Table =f.read_all()
SCREAMING_SNAKE_CASE__ : Any =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase__ )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 1_0] )
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__, writer_batch_size=UpperCamelCase__, hash_salt='''split_name''', check_duplicates=UpperCamelCase__, ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=[1, 2] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 1_0] )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__, writer_batch_size=UpperCamelCase__, hash_salt='''split_name''', check_duplicates=UpperCamelCase__, ) as writer:
with pytest.raises(UpperCamelCase__ ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=1_0 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=1_0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 1_0] )
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase__, writer_batch_size=UpperCamelCase__, hash_salt='''split_name''', check_duplicates=UpperCamelCase__, ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : int =pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__, schema=UpperCamelCase__, writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : str ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : List[Any] =pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__, schema=UpperCamelCase__, writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 1_0] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE__ : List[Any] =pa.schema(UpperCamelCase__ ) if fields else None
with ArrowWriter(stream=UpperCamelCase__, schema=UpperCamelCase__, writer_batch_size=UpperCamelCase__ ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE__ : Dict ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase__, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _a( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Dict ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(UpperCamelCase__, '''test.arrow''' )
with ArrowWriter(path=UpperCamelCase__, schema=pa.schema(UpperCamelCase__ ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase__, metadata=writer._schema.metadata )
_check_output(UpperCamelCase__, 1 )
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if pa.types.is_list(UpperCamelCase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
if isinstance(lst[0], UpperCamelCase__ ):
change_first_primitive_element_in_list(lst[0], UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Dict =value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''', [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =pa.array(TypedSequence(UpperCamelCase__, optimized_int_type=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''', [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
], )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pa.array(OptimizedTypedSequence(UpperCamelCase__, col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE__ : Tuple =copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] =pa.array(OptimizedTypedSequence(UpperCamelCase__, col=UpperCamelCase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''', [False, True] )
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=UpperCamelCase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _a( UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] ='''mock://dataset-train.arrow'''
with ArrowWriter(path=UpperCamelCase__, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(UpperCamelCase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] =pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase__ ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE__ : Tuple =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ : pa.Table =pq.read_table(UpperCamelCase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''', [False, True] )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import PIL.Image
SCREAMING_SNAKE_CASE__ : str =str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(UpperCamelCase__, format='''png''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase__, features=Features({'''image''': Image()} ), embed_local_files=UpperCamelCase__ ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE__ : List[str] =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE__ : pa.Table =pq.read_table(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''], UpperCamelCase__ )
with open(UpperCamelCase__, '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =pa.schema([pa.field('''col_1''', pa.string(), nullable=UpperCamelCase__ )] )
SCREAMING_SNAKE_CASE__ : List[str] =pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase__ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase__ )
assert writer._schema == pa.schema([pa.field('''col_1''', pa.string() )] ) | 222 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
SCREAMING_SNAKE_CASE__ : List[str] =(l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE__ : Dict =m
else:
SCREAMING_SNAKE_CASE__ : Any =m # noqa: E741
return r
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[0]
for i in range(1, len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE__ : List[Any] =v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE__ : int =v[i]
length += 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 222 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ):
# load base model
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(__UpperCamelCase ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ : str = load_file(__UpperCamelCase )
lowerCAmelCase_ : List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ : Optional[Any] = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowerCAmelCase_ : Optional[int] = pipeline.text_encoder
else:
lowerCAmelCase_ : List[str] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowerCAmelCase_ : Dict = pipeline.unet
# find the target layer
lowerCAmelCase_ : int = layer_infos.pop(0 )
while len(__UpperCamelCase ) > -1:
try:
lowerCAmelCase_ : Tuple = curr_layer.__getattr__(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
lowerCAmelCase_ : Optional[int] = layer_infos.pop(0 )
elif len(__UpperCamelCase ) == 0:
break
except Exception:
if len(__UpperCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ : Optional[Any] = layer_infos.pop(0 )
lowerCAmelCase_ : Union[str, Any] = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' ,'''lora_up''' ) )
pair_keys.append(__UpperCamelCase )
else:
pair_keys.append(__UpperCamelCase )
pair_keys.append(key.replace('''lora_up''' ,'''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ : List[str] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ : List[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase ,__UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ : List[Any] = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__UpperCamelCase ,__UpperCamelCase )
# update visited list
for item in pair_keys:
visited.append(__UpperCamelCase )
return pipeline
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.7_5, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
A__ : Dict = parser.parse_args()
A__ : List[Any] = args.base_model_path
A__ : str = args.checkpoint_path
A__ : int = args.dump_path
A__ : int = args.lora_prefix_unet
A__ : List[Any] = args.lora_prefix_text_encoder
A__ : str = args.alpha
A__ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A__ : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 103 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=A_ , )
assert hasattr(self , '''env''')
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : str=1):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=A_ , instance_type=self.instance_type , debugger_hook_config=A_ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[Any]):
TrainingJobAnalytics(A_).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def UpperCAmelCase__ ( self : int):
# create estimator
lowerCAmelCase_ : List[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
lowerCAmelCase_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
lowerCAmelCase_ : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ : Dict = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , A_)
| 103 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase : Union[str, Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
lowercase : Any = '''maskformer'''
lowercase : Any = {'''hidden_size''': '''mask_feature_size'''}
lowercase : List[str] = ['''resnet''', '''swin''']
lowercase : Tuple = ['''detr''']
def __init__( self , __UpperCamelCase = 2_56 , __UpperCamelCase = 2_56 , __UpperCamelCase = 0.1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0.02 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 20.0 , __UpperCamelCase = None , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__UpperCamelCase : str = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_a , _a ):
__UpperCamelCase : Tuple = backbone_config.pop("model_type" )
__UpperCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : Tuple = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__UpperCamelCase : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
__UpperCamelCase : List[str] = (
decoder_config.pop("model_type" ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
__UpperCamelCase : List[Any] = CONFIG_MAPPING[decoder_type]
__UpperCamelCase : int = config_class.from_dict(_a )
__UpperCamelCase : Tuple = backbone_config
__UpperCamelCase : int = decoder_config
# main feature dimension for the model
__UpperCamelCase : Tuple = fpn_feature_size
__UpperCamelCase : List[str] = mask_feature_size
# initializer
__UpperCamelCase : Optional[Any] = init_std
__UpperCamelCase : Dict = init_xavier_std
# Hungarian matcher && loss
__UpperCamelCase : int = cross_entropy_weight
__UpperCamelCase : List[str] = dice_weight
__UpperCamelCase : List[Any] = mask_weight
__UpperCamelCase : List[Any] = use_auxiliary_loss
__UpperCamelCase : Dict = no_object_weight
__UpperCamelCase : int = output_auxiliary_logits
__UpperCamelCase : Optional[int] = self.decoder_config.encoder_attention_heads
__UpperCamelCase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def __lowerCamelCase ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> str:
'''simple docstring'''
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : int = copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[str] = self.backbone_config.to_dict()
__UpperCamelCase : Tuple = self.decoder_config.to_dict()
__UpperCamelCase : List[Any] = self.__class__.model_type
return output | 363 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase : int = logging.get_logger(__name__)
lowercase : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
for attribute in key.split("." ):
__UpperCamelCase : Any = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
__UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
__UpperCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase : Dict = value
elif weight_type == "weight_g":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "weight_v":
__UpperCamelCase : Union[str, Any] = value
elif weight_type == "bias":
__UpperCamelCase : str = value
else:
__UpperCamelCase : Union[str, Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : List[Any] = fairseq_model.state_dict()
__UpperCamelCase : List[str] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase : Tuple = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
__UpperCamelCase : Dict = True
if "*" in mapped_key:
__UpperCamelCase : str = name.split(_lowerCAmelCase )[0].split("." )[-2]
__UpperCamelCase : Optional[Any] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
__UpperCamelCase : Any = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : Optional[int] = "weight_v"
elif "weight" in name:
__UpperCamelCase : str = "weight"
elif "bias" in name:
__UpperCamelCase : List[str] = "bias"
else:
__UpperCamelCase : Optional[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
__UpperCamelCase : Tuple = full_name.split("conv_layers." )[-1]
__UpperCamelCase : Dict = name.split("." )
__UpperCamelCase : Optional[int] = int(items[0] )
__UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True ):
if config_path is not None:
__UpperCamelCase : Dict = HubertConfig.from_pretrained(_lowerCAmelCase )
else:
__UpperCamelCase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase : int = Dictionary.load(_lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase : Optional[Any] = target_dict.pad_index
__UpperCamelCase : Any = target_dict.bos_index
__UpperCamelCase : List[str] = target_dict.eos_index
__UpperCamelCase : Tuple = len(target_dict.symbols )
__UpperCamelCase : str = os.path.join(_lowerCAmelCase , "vocab.json" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase )
__UpperCamelCase : int = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
__UpperCamelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
__UpperCamelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
__UpperCamelCase : int = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = HubertForCTC(_lowerCAmelCase )
else:
__UpperCamelCase : Union[str, Any] = HubertModel(_lowerCAmelCase )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
hf_wavavec.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowercase : List[str] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 171 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
_lowerCAmelCase : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCAmelCase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'The input training data file (a text file).'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'Whether ot not to use whole word mask.'} )
SCREAMING_SNAKE_CASE = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
SCREAMING_SNAKE_CASE = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
SCREAMING_SNAKE_CASE = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCamelCase_( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ):
"""simple docstring"""
def _dataset(_snake_case : List[str] , _snake_case : Tuple=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__a , __a , __a =parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__a =AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__a =AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__a =CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
__a =AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__a =AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
__a =AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
__a =AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
__a =tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__a =min(data_args.block_size , tokenizer.max_len )
# Get datasets
__a =(
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__a =(
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__a =DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__a =DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__a =DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__a =(
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__a ={}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate()
__a =math.exp(eval_output['eval_loss'] )
__a ={'perplexity': perplexity}
__a =os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 218 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =[
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
__a , __a =emb.weight.shape
__a =nn.Linear(_snake_case , _snake_case , bias=_snake_case )
__a =emb.weight.data
return lin_layer
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )
__a =Namespace(**checkpoint['cfg']['model'] )
__a =checkpoint['model']
remove_ignore_keys_(_snake_case )
__a =state_dict['decoder.embed_tokens.weight'].shape[0]
__a ={key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
__a =XGLMConfig(
vocab_size=_snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__a =XGLMForCausalLM(_snake_case )
__a =model.load_state_dict(_snake_case , strict=_snake_case )
print(_snake_case )
__a =make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 218 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _snake_case ( _snake_case ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
a :Any = BertTokenizer.from_pretrained('''bert-base-uncased''' )
a :Any = bertabert.config.encoder.vocab_size
a :Dict = tokenizer.sep_token_id
a :List[str] = tokenizer.cls_token_id
a :Union[str, Any] = 128
a :str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
a :str = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
a :Union[str, Any] = train_dataset.select(range(32 ) )
a :Any = val_dataset.select(range(16 ) )
a :Tuple = 4
def _map_to_encoder_decoder_inputs(_lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
a :List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_lowerCamelCase , max_length=512 )
a :List[str] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_lowerCamelCase , max_length=128 )
a :Optional[Any] = inputs.input_ids
a :Dict = inputs.attention_mask
a :Dict = outputs.input_ids
a :List[Any] = outputs.input_ids.copy()
a :Union[str, Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
a :Tuple = outputs.attention_mask
assert all(len(_lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(_lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCamelCase ):
a :Optional[int] = pred.label_ids
a :Union[str, Any] = pred.predictions
# all unnecessary tokens are removed
a :int = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
a :Any = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
a :str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCamelCase ) )] ) / len(_lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
a :Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCamelCase , batch_size=_lowerCamelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
a :Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCamelCase , batch_size=_lowerCamelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
a :Tuple = self.get_auto_remove_tmp_dir()
a :int = SeqaSeqTrainingArguments(
output_dir=_lowerCamelCase , per_device_train_batch_size=_lowerCamelCase , per_device_eval_batch_size=_lowerCamelCase , predict_with_generate=_lowerCamelCase , evaluation_strategy='''steps''' , do_train=_lowerCamelCase , do_eval=_lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
a :Optional[Any] = SeqaSeqTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# start training
trainer.train()
| 363 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 281 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowercase_ = get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> str:
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = os.path.join(UpperCamelCase__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(f'Saving model to {ckpt_dir}' )
A__ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=UpperCamelCase__ , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ) -> List[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(UpperCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
A__ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Loading model from {input_model_file}' )
A__ = torch.load(UpperCamelCase__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Loading model from {input_model_file}' )
A__ = torch.load(UpperCamelCase__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = (
os.path.join(UpperCamelCase__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
A__ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=UpperCamelCase__ , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , planner=DefaultLoadPlanner() , )
A__ = state_dict['''model''']
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(UpperCamelCase__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> List[Any]:
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = FSDP.optim_state_dict(UpperCamelCase__ , UpperCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A__ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
A__ = os.path.join(UpperCamelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Tuple:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A__ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
A__ = torch.load(UpperCamelCase__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
A__ = (
os.path.join(UpperCamelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
A__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , )
A__ = optim_state['''optimizer''']
logger.info(f'Optimizer loaded from {ckpt_dir}' )
A__ = FSDP.optim_state_dict_to_load(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
optimizer.load_state_dict(UpperCamelCase__ )
| 7 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b"
_UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:]
_UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'luke'
def __init__( self , lowercase=50267 , lowercase=500000 , lowercase=768 , lowercase=256 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.0_2 , lowercase=1e-12 , lowercase=True , lowercase=None , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ) -> List[str]:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = entity_vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = entity_emb_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_entity_aware_attention
lowerCamelCase_ = classifier_dropout
| 47 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = str(lowerCamelCase__ )
lowerCamelCase_ = [n]
for i in range(1 , len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ = 1_1 ):
lowerCamelCase_ = []
lowerCamelCase_ = 1_3
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
lowerCamelCase_ = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( ):
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 47 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.