code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] * len(lowercase ) for i in range(1 ,len(lowercase ) ): # use last results for better performance - dynamic programming _UpperCAmelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _UpperCAmelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _UpperCAmelCase = j return prefix_result def __UpperCAmelCase ( lowercase ): """simple docstring""" return max(prefix_function(lowercase ) ) if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = BitConfig( global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,) _UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(lowercase ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(lowercase ).unsqueeze(0 ) _UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase ,lowercase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(lowercase ) _UpperCAmelCase = outputs.logits print("""Predicted class:""" ,logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 ) else: _UpperCAmelCase = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
1
"""simple docstring""" import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ = 1_6 UpperCAmelCase__ = 3_2 def __UpperCAmelCase ( lowercase ,lowercase = 16 ): """simple docstring""" _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(lowercase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) _UpperCAmelCase = DataLoader( tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ = mocked_dataloaders # noqa: F811 def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1": _UpperCAmelCase = 2 # Initialize accelerator _UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["""lr"""] _UpperCAmelCase = int(config["""num_epochs"""] ) _UpperCAmelCase = int(config["""seed"""] ) _UpperCAmelCase = int(config["""batch_size"""] ) _UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase ) def inner_training_loop(lowercase ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(lowercase ,lowercase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) # Now we train the model for epoch in range(lowercase ): model.train() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.loss accelerator.backward(lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase ,references=lowercase ,) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,lowercase ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowercase ,lowercase ) if __name__ == "__main__": main()
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = AutoConfig.from_pretrained(lowercase ) _UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase ) _UpperCAmelCase = checkpoints.load_tax_checkpoint(lowercase ) _UpperCAmelCase = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""] if config.model_type == "t5": _UpperCAmelCase = """SelfAttention""" if config.model_type == "longt5" and config.encoder_attention_type == "local": _UpperCAmelCase = """LocalSelfAttention""" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = """TransientGlobalSelfAttention""" else: raise ValueError( """Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`""" """ attribute with a value from ['local', 'transient-global].""" ) # Encoder for layer_index in range(config.num_layers ): _UpperCAmelCase = f'''layers_{str(lowercase )}''' # Self-Attention _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""] # Layer Normalization _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""] if split_mlp_wi: _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization _UpperCAmelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning _UpperCAmelCase = flax_model.params["""encoder"""]["""block"""][str(lowercase )]["""layer"""] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = tax_mlp_layer_norm _UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: _UpperCAmelCase = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T _UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _UpperCAmelCase = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T _UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning _UpperCAmelCase = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""] _UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _UpperCAmelCase = f'''layers_{str(lowercase )}''' # Self-Attention _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""] # Layer Normalization _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][ """scale""" ] # Encoder-Decoder-Attention _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""] _UpperCAmelCase = tax_enc_dec_attention_module["""key"""]["""kernel"""] _UpperCAmelCase = tax_enc_dec_attention_module["""out"""]["""kernel"""] _UpperCAmelCase = tax_enc_dec_attention_module["""query"""]["""kernel"""] _UpperCAmelCase = tax_enc_dec_attention_module["""value"""]["""kernel"""] # Layer Normalization _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""] # MLP if split_mlp_wi: _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""] else: _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""] _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""] # Layer Normalization _UpperCAmelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""] # Assigning _UpperCAmelCase = flax_model.params["""decoder"""]["""block"""][str(lowercase )]["""layer"""] _UpperCAmelCase = tax_attention_key _UpperCAmelCase = tax_attention_out _UpperCAmelCase = tax_attention_query _UpperCAmelCase = tax_attention_value _UpperCAmelCase = tax_pre_attention_layer_norm _UpperCAmelCase = tax_enc_dec_attention_key _UpperCAmelCase = tax_enc_dec_attention_out _UpperCAmelCase = tax_enc_dec_attention_query _UpperCAmelCase = tax_enc_dec_attention_value _UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: _UpperCAmelCase = tax_mlp_wi_a _UpperCAmelCase = tax_mlp_wi_a else: _UpperCAmelCase = tax_mlp_wi _UpperCAmelCase = tax_mlp_wo _UpperCAmelCase = txa_mlp_layer_norm _UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization _UpperCAmelCase = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""] _UpperCAmelCase = txa_decoder_norm # Only for layer 0: _UpperCAmelCase = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T _UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings _UpperCAmelCase = tax_model["""target"""]["""token_embedder"""]["""embedding"""] _UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _UpperCAmelCase = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""] flax_model.save_pretrained(lowercase ) print("""T5X Model was sucessfully converted!""" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint.""" ) parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""") parser.add_argument( """--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
30
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
30
1
"""simple docstring""" from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = [] for rt in rc.restypes: _UpperCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) _UpperCAmelCase = {name: i for i, name in enumerate(lowercase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) _UpperCAmelCase = torch.tensor( lowercase ,dtype=torch.intaa ,device=protein["""aatype"""].device ,) _UpperCAmelCase = torch.tensor( lowercase ,dtype=torch.intaa ,device=protein["""aatype"""].device ,) _UpperCAmelCase = torch.tensor( lowercase ,dtype=torch.floataa ,device=protein["""aatype"""].device ,) _UpperCAmelCase = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein _UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype] _UpperCAmelCase = restype_atomaa_mask[protein_aatype] _UpperCAmelCase = residx_atomaa_mask _UpperCAmelCase = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back _UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype] _UpperCAmelCase = residx_atomaa_to_atomaa.long() # create the corresponding mask _UpperCAmelCase = torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): _UpperCAmelCase = rc.restype_atoa[restype_letter] _UpperCAmelCase = rc.residue_atoms[restype_name] for atom_name in atom_names: _UpperCAmelCase = rc.atom_order[atom_name] _UpperCAmelCase = 1 _UpperCAmelCase = restype_atomaa_mask[protein_aatype] _UpperCAmelCase = residx_atomaa_mask return protein def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = tree_map(lambda lowercase : torch.tensor(lowercase ,device=batch["""aatype"""].device ) ,lowercase ,np.ndarray ) _UpperCAmelCase = tensor_tree_map(lambda lowercase : np.array(lowercase ) ,make_atomaa_masks(lowercase ) ) return out
30
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" from math import factorial def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(lowercase ,lowercase ) or not isinstance(lowercase ,lowercase ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) _UpperCAmelCase = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _UpperCAmelCase = float(factorial(lowercase ) ) coefficient /= factorial(lowercase ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
30
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" def __UpperCAmelCase ( lowercase ): """simple docstring""" # authorize twitter, initialize tweepy _UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase ) auth.set_access_token(lowercase ,lowercase ) _UpperCAmelCase = tweepy.API(lowercase ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 ) # save most recent tweets alltweets.extend(lowercase ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=lowercase ,count=2_00 ,max_id=lowercase ) # save most recent tweets alltweets.extend(lowercase ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'''...{len(lowercase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f: _UpperCAmelCase = csv.writer(lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
30
1
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def __UpperCAmelCase ( lowercase ): """simple docstring""" return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code ) class a ( lowerCAmelCase_ ): @staticmethod def lowerCAmelCase_ ( __lowerCAmelCase : ArgumentParser ): _UpperCAmelCase = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , ) download_parser.add_argument("""model""" , type=__lowerCAmelCase , help="""Name of the model to download""" ) download_parser.set_defaults(func=__lowerCAmelCase ) def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : bool , __lowerCAmelCase : bool ): _UpperCAmelCase = model _UpperCAmelCase = cache _UpperCAmelCase = force _UpperCAmelCase = trust_remote_code def lowerCAmelCase_ ( self : List[str] ): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
30
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""] _UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments) UpperCAmelCase__ = parser.parse_args() if args.num_workers is None: UpperCAmelCase__ = multiprocessing.cpu_count() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase__ = time.time() UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() UpperCAmelCase__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
30
1
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) UpperCAmelCase__ = pytest.mark.integration @pytest.mark.parametrize("""path""" ,["""paws""", """csv"""] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" inspect_dataset(lowercase ,lowercase ) _UpperCAmelCase = path + """.py""" assert script_name in os.listdir(lowercase ) assert "__pycache__" not in os.listdir(lowercase ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" ,["""accuracy"""] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" inspect_metric(lowercase ,lowercase ) _UpperCAmelCase = path + """.py""" assert script_name in os.listdir(lowercase ) assert "__pycache__" not in os.listdir(lowercase ) @pytest.mark.parametrize( """path, config_name, expected_splits""" ,[ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_dataset_config_info(lowercase ,config_name=lowercase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" ,[ ("""paws""", None, ValueError), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" with pytest.raises(lowercase ): get_dataset_config_info(lowercase ,config_name=lowercase ) @pytest.mark.parametrize( """path, expected""" ,[ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_dataset_config_names(lowercase ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" ,[ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_dataset_infos(lowercase ) assert list(infos.keys() ) == expected_configs _UpperCAmelCase = expected_configs[0] assert expected_config in infos _UpperCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" ,[ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_dataset_infos(lowercase ) assert expected_config in infos _UpperCAmelCase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" ,[ ("""paws""", None, ValueError), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" with pytest.raises(lowercase ): get_dataset_split_names(lowercase ,config_name=lowercase )
30
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
1
"""simple docstring""" import os def __UpperCAmelCase ( lowercase = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(lowercase ) ,lowercase ) ) as input_file: _UpperCAmelCase = [ [int(lowercase ) for element in line.split(""",""" )] for line in input_file.readlines() ] _UpperCAmelCase = len(lowercase ) _UpperCAmelCase = len(matrix[0] ) _UpperCAmelCase = [[-1 for _ in range(lowercase )] for _ in range(lowercase )] for i in range(lowercase ): _UpperCAmelCase = matrix[i][0] for j in range(1 ,lowercase ): for i in range(lowercase ): _UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 ,lowercase ): _UpperCAmelCase = min( minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 ,-1 ,-1 ): _UpperCAmelCase = min( minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
30
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __UpperCAmelCase ( lowercase=None ,lowercase=None ): """simple docstring""" return field(default_factory=lambda: default ,metadata=lowercase ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The csv file to plot.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) _snake_case : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def __UpperCAmelCase ( lowercase ): """simple docstring""" try: int(lowercase ) return True except ValueError: return False def __UpperCAmelCase ( lowercase ): """simple docstring""" try: float(lowercase ) return True except ValueError: return False class a : def __init__( self : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = args _UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _UpperCAmelCase = csv.DictReader(__lowerCAmelCase ) for row in reader: _UpperCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _UpperCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _UpperCAmelCase = float(row["""result"""] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = plt.subplots() _UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _UpperCAmelCase = self.result_dict[model_name]["""result"""] ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: _UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" ) title_str += f''' {label_model_name} vs.''' _UpperCAmelCase = title_str[:-4] _UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser(lowercase ) _UpperCAmelCase = parser.parse_args_into_dataclasses()[0] _UpperCAmelCase = Plot(args=lowercase ) plot.plot() if __name__ == "__main__": main()
30
1
"""simple docstring""" import argparse import json import subprocess def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = ( f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' """ https://api.github.com/repos/huggingface/transformers/actions/runners""" ) _UpperCAmelCase = subprocess.run(lowercase ,shell=lowercase ,stdout=subprocess.PIPE ) _UpperCAmelCase = output.stdout.decode("""utf-8""" ) _UpperCAmelCase = json.loads(lowercase ) _UpperCAmelCase = status["""runners"""] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(lowercase ) # save the result so we can report them on Slack with open("""offline_runners.txt""" ,"""w""" ) as fp: fp.write(json.dumps(lowercase ) ) if len(lowercase ) > 0: _UpperCAmelCase = """\n""".join([x["""name"""] for x in offline_runners] ) raise ValueError(f'''The following runners are offline:\n{failed}''' ) if __name__ == "__main__": def __UpperCAmelCase ( lowercase ): """simple docstring""" return values.split(""",""" ) UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) UpperCAmelCase__ = parser.parse_args() get_runner_status(args.target_runners, args.token)
30
"""simple docstring""" import os import pytest from attr import dataclass UpperCAmelCase__ = """us-east-1""" # defaults region @dataclass class a : _snake_case : str _snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' _snake_case : List[Any] = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_00, 'save_steps': 55_00, } _snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00} @property def lowerCAmelCase_ ( self : Optional[Any] ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase_ ( self : Dict ): return f'''{self.framework}-transfromers-test''' @property def lowerCAmelCase_ ( self : Union[str, Any] ): return f'''./tests/sagemaker/scripts/{self.framework}''' @property def lowerCAmelCase_ ( self : Dict ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
30
1
"""simple docstring""" import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""), ("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""), ("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""), ("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""), ("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""), ("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""), ] ) return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) _UpperCAmelCase = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = in_proj_weight[ : encoder_config.hidden_size, : ] _UpperCAmelCase = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_weight[ -encoder_config.hidden_size :, : ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( lowercase ): """simple docstring""" if "handwritten" in checkpoint_url: _UpperCAmelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: _UpperCAmelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ).convert("""RGB""" ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = ViTConfig(image_size=3_84 ,qkv_bias=lowercase ) _UpperCAmelCase = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: _UpperCAmelCase = 7_68 elif "large" in checkpoint_url: # use ViT-large encoder _UpperCAmelCase = 10_24 _UpperCAmelCase = 40_96 _UpperCAmelCase = 24 _UpperCAmelCase = 16 _UpperCAmelCase = 10_24 else: raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: _UpperCAmelCase = False _UpperCAmelCase = """relu""" _UpperCAmelCase = 10_24 _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = False # load HuggingFace model _UpperCAmelCase = ViTModel(lowercase ,add_pooling_layer=lowercase ) _UpperCAmelCase = TrOCRForCausalLM(lowercase ) _UpperCAmelCase = VisionEncoderDecoderModel(encoder=lowercase ,decoder=lowercase ) model.eval() # load state_dict of original model, rename some keys _UpperCAmelCase = torch.hub.load_state_dict_from_url(lowercase ,map_location="""cpu""" ,check_hash=lowercase )["""model"""] _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): _UpperCAmelCase = state_dict.pop(lowercase ) if key.startswith("""decoder""" ) and "output_projection" not in key: _UpperCAmelCase = val else: _UpperCAmelCase = val # load state dict model.load_state_dict(lowercase ) # Check outputs on an image _UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size ) _UpperCAmelCase = RobertaTokenizer.from_pretrained("""roberta-large""" ) _UpperCAmelCase = TrOCRProcessor(lowercase ,lowercase ) _UpperCAmelCase = processor(images=prepare_img(lowercase ) ,return_tensors="""pt""" ).pixel_values # verify logits _UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) _UpperCAmelCase = model(pixel_values=lowercase ,decoder_input_ids=lowercase ) _UpperCAmelCase = outputs.logits _UpperCAmelCase = torch.Size([1, 1, 5_02_65] ) if "trocr-base-handwritten" in checkpoint_url: _UpperCAmelCase = torch.tensor( [-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] ) elif "trocr-large-handwritten" in checkpoint_url: _UpperCAmelCase = torch.tensor( [-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] ) elif "trocr-base-printed" in checkpoint_url: _UpperCAmelCase = torch.tensor( [-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] ) elif "trocr-large-printed" in checkpoint_url: _UpperCAmelCase = torch.tensor( [-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] ,lowercase ,atol=1E-3 ), "First elements of logits not as expected" Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
30
"""simple docstring""" import string from math import logaa def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = document.translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" ) _UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = corpus.lower().translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("""\n""" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowercase )) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) ,3 ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return round(tf * idf ,3 )
30
1
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = (1 - _cos) / 2 _UpperCAmelCase = 1 - _cos _UpperCAmelCase = 1 + alpha _UpperCAmelCase = -2 * _cos _UpperCAmelCase = 1 - alpha _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = (1 + _cos) / 2 _UpperCAmelCase = -1 - _cos _UpperCAmelCase = 1 + alpha _UpperCAmelCase = -2 * _cos _UpperCAmelCase = 1 - alpha _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = _sin / 2 _UpperCAmelCase = 0 _UpperCAmelCase = -ba _UpperCAmelCase = 1 + alpha _UpperCAmelCase = -2 * _cos _UpperCAmelCase = 1 - alpha _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def __UpperCAmelCase ( lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = 1 - alpha _UpperCAmelCase = -2 * _cos _UpperCAmelCase = 1 + alpha _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] ) return filt def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = 10 ** (gain_db / 40) _UpperCAmelCase = 1 + alpha * big_a _UpperCAmelCase = -2 * _cos _UpperCAmelCase = 1 - alpha * big_a _UpperCAmelCase = 1 + alpha / big_a _UpperCAmelCase = -2 * _cos _UpperCAmelCase = 1 - alpha / big_a _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = 10 ** (gain_db / 40) _UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCAmelCase = 2 * sqrt(lowercase ) * alpha _UpperCAmelCase = big_a * (pmc + aaa) _UpperCAmelCase = 2 * big_a * mpc _UpperCAmelCase = big_a * (pmc - aaa) _UpperCAmelCase = ppmc + aaa _UpperCAmelCase = -2 * pmpc _UpperCAmelCase = ppmc - aaa _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase = 1 / sqrt(2 ) ,): """simple docstring""" _UpperCAmelCase = tau * frequency / samplerate _UpperCAmelCase = sin(lowercase ) _UpperCAmelCase = cos(lowercase ) _UpperCAmelCase = _sin / (2 * q_factor) _UpperCAmelCase = 10 ** (gain_db / 40) _UpperCAmelCase = (big_a + 1) - (big_a - 1) * _cos _UpperCAmelCase = (big_a + 1) + (big_a - 1) * _cos _UpperCAmelCase = (big_a - 1) - (big_a + 1) * _cos _UpperCAmelCase = (big_a - 1) + (big_a + 1) * _cos _UpperCAmelCase = 2 * sqrt(lowercase ) * alpha _UpperCAmelCase = big_a * (ppmc + aaa) _UpperCAmelCase = -2 * big_a * pmpc _UpperCAmelCase = big_a * (ppmc - aaa) _UpperCAmelCase = pmc + aaa _UpperCAmelCase = 2 * mpc _UpperCAmelCase = pmc - aaa _UpperCAmelCase = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] ) return filt
30
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) else: _UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) _UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""] _UpperCAmelCase = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: _UpperCAmelCase = key.split(""".""" ) if attributes[0] == "lm_head": _UpperCAmelCase = prophet _UpperCAmelCase = prophet_old else: _UpperCAmelCase = prophet.prophetnet _UpperCAmelCase = prophet_old.model _UpperCAmelCase = False for attribute in attributes: if attribute in mapping: _UpperCAmelCase = mapping[attribute] if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0: _UpperCAmelCase = attribute elif hasattr(lowercase ,lowercase ): _UpperCAmelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _UpperCAmelCase = old_model.weight logger.info(f'''{attribute} is initialized.''' ) _UpperCAmelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _UpperCAmelCase = old_model.bias logger.info(f'''{attribute} is initialized''' ) _UpperCAmelCase = True break elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ): _UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3 _UpperCAmelCase = getattr(lowercase ,lowercase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _UpperCAmelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." _UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) _UpperCAmelCase = True break if attribute.isdigit(): _UpperCAmelCase = model[int(lowercase )] _UpperCAmelCase = old_model[int(lowercase )] else: _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_attribute == "": _UpperCAmelCase = old_model else: if not hasattr(lowercase ,lowercase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) _UpperCAmelCase = getattr(lowercase ,lowercase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
30
1
"""simple docstring""" import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class a ( lowerCAmelCase_ , unittest.TestCase ): # TODO: is there an appropriate internal test set? _snake_case : Tuple = 'ssube/stable-diffusion-x4-upscaler-onnx' def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Any=0 ): _UpperCAmelCase = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCAmelCase ) ) _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) _UpperCAmelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs() _UpperCAmelCase = pipe(**__lowerCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _UpperCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs() _UpperCAmelCase = pipe(**__lowerCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs() _UpperCAmelCase = pipe(**__lowerCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _UpperCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs() _UpperCAmelCase = pipe(**__lowerCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) _UpperCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs() _UpperCAmelCase = pipe(**__lowerCAmelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class a ( unittest.TestCase ): @property def lowerCAmelCase_ ( self : str ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = ort.SessionOptions() _UpperCAmelCase = False return options def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _UpperCAmelCase = init_image.resize((128, 128) ) # using the PNDM scheduler by default _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = """A fantasy landscape, trending on artstation""" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type="""np""" , ) _UpperCAmelCase = output.images _UpperCAmelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _UpperCAmelCase = init_image.resize((128, 128) ) _UpperCAmelCase = LMSDiscreteScheduler.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" ) _UpperCAmelCase = OnnxStableDiffusionUpscalePipeline.from_pretrained( """ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = """A fantasy landscape, trending on artstation""" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCAmelCase , output_type="""np""" , ) _UpperCAmelCase = output.images _UpperCAmelCase = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
30
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : Dict = ( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Dict = False _snake_case : List[str] = False def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : int ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = [1, 6, 3_0522] self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
30
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'upernet' def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config.get("""model_type""" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__lowerCAmelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase__ = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase__ = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase__ = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a ( datasets.Metric ): def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[List[List[str]]] , __lowerCAmelCase : List[List[str]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 4 , ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__lowerCAmelCase , hypotheses=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase ) }
30
"""simple docstring""" from itertools import product def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(lowercase ,max_face_number + 1 ) for dice_numbers in product(lowercase ,repeat=lowercase ): _UpperCAmelCase = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
30
1
"""simple docstring""" import torch import torch.nn as nn from transformers.modeling_utils import ModuleUtilsMixin from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): @register_to_config def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : bool = False , ): super().__init__() _UpperCAmelCase = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = nn.Embedding(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = False _UpperCAmelCase = nn.Dropout(p=__lowerCAmelCase ) _UpperCAmelCase = TaConfig( vocab_size=__lowerCAmelCase , d_model=__lowerCAmelCase , num_heads=__lowerCAmelCase , d_kv=__lowerCAmelCase , d_ff=__lowerCAmelCase , dropout_rate=__lowerCAmelCase , feed_forward_proj=__lowerCAmelCase , is_decoder=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , ) _UpperCAmelCase = nn.ModuleList() for lyr_num in range(__lowerCAmelCase ): _UpperCAmelCase = TaBlock(__lowerCAmelCase ) self.encoders.append(__lowerCAmelCase ) _UpperCAmelCase = TaLayerNorm(__lowerCAmelCase ) _UpperCAmelCase = nn.Dropout(p=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): _UpperCAmelCase = self.token_embedder(__lowerCAmelCase ) _UpperCAmelCase = encoder_input_tokens.shape[1] _UpperCAmelCase = torch.arange(__lowerCAmelCase , device=encoder_input_tokens.device ) x += self.position_encoding(__lowerCAmelCase ) _UpperCAmelCase = self.dropout_pre(__lowerCAmelCase ) # inverted the attention mask _UpperCAmelCase = encoder_input_tokens.size() _UpperCAmelCase = self.get_extended_attention_mask(__lowerCAmelCase , __lowerCAmelCase ) for lyr in self.encoders: _UpperCAmelCase = lyr(__lowerCAmelCase , __lowerCAmelCase )[0] _UpperCAmelCase = self.layer_norm(__lowerCAmelCase ) return self.dropout_post(__lowerCAmelCase ), encoder_inputs_mask
30
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[Any] = 'vision-encoder-decoder' _snake_case : Optional[int] = True def __init__( self : int , **__lowerCAmelCase : Any ): super().__init__(**__lowerCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _UpperCAmelCase = kwargs.pop("""encoder""" ) _UpperCAmelCase = encoder_config.pop("""model_type""" ) _UpperCAmelCase = kwargs.pop("""decoder""" ) _UpperCAmelCase = decoder_config.pop("""model_type""" ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = True @classmethod def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ): logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _UpperCAmelCase = True _UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.encoder.to_dict() _UpperCAmelCase = self.decoder.to_dict() _UpperCAmelCase = self.__class__.model_type return output class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : Tuple ): return 1e-4 @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = OrderedDict() _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ): import torch _UpperCAmelCase = OrderedDict() _UpperCAmelCase = super().generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape _UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) _UpperCAmelCase = dummy_input.pop("""input_ids""" ) _UpperCAmelCase = dummy_input.pop("""attention_mask""" ) _UpperCAmelCase = torch.zeros(__lowerCAmelCase ) return common_inputs class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ): _UpperCAmelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
30
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class a ( unittest.TestCase ): def __init__( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=7 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[Any]=18 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Optional[int]=400 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __lowerCAmelCase : Dict=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __lowerCAmelCase : Union[str, Any]=True , ): _UpperCAmelCase = size if size is not None else {"""height""": 224, """width""": 224} _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_convert_rgb def lowerCAmelCase_ ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : str=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: _UpperCAmelCase = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: _UpperCAmelCase = [] for i in range(self.batch_size ): _UpperCAmelCase , _UpperCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension _UpperCAmelCase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] if torchify: _UpperCAmelCase = [torch.from_numpy(__lowerCAmelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Any = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCAmelCase ) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_convert_rgb""" ) ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 224, """width""": 224} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self : List[str] ): pass def lowerCAmelCase_ ( self : int ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : List[Any] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self : List[Any] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Dict = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCAmelCase ) _UpperCAmelCase = 3 @property def lowerCAmelCase_ ( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_convert_rgb""" ) ) def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : Optional[Any] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
30
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) UpperCAmelCase__ = CLIPImageProcessor() UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") UpperCAmelCase__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
30
1
"""simple docstring""" import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ): """simple docstring""" # Recurse if needed if "." in tensor_name: _UpperCAmelCase = tensor_name.split(""".""" ) for split in splits[:-1]: _UpperCAmelCase = getattr(lowercase ,lowercase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) _UpperCAmelCase = new_module _UpperCAmelCase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) _UpperCAmelCase = tensor_name in module._buffers _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) _UpperCAmelCase = False _UpperCAmelCase = False if is_buffer or not is_bitsandbytes_available(): _UpperCAmelCase = False _UpperCAmelCase = False else: _UpperCAmelCase = hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit ) _UpperCAmelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams ) if is_abit or is_abit: _UpperCAmelCase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _UpperCAmelCase = old_value.to(lowercase ) elif isinstance(lowercase ,torch.Tensor ): _UpperCAmelCase = value.to("""cpu""" ) if value.dtype == torch.inta: _UpperCAmelCase = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: _UpperCAmelCase = torch.tensor(lowercase ,device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls ,lowercase ) and fpaa_statistics is None: _UpperCAmelCase = new_value.T _UpperCAmelCase = old_value.__dict__ if is_abit: _UpperCAmelCase = bnb.nn.IntaParams(lowercase ,requires_grad=lowercase ,**lowercase ).to(lowercase ) elif is_abit: _UpperCAmelCase = bnb.nn.Paramsabit(lowercase ,requires_grad=lowercase ,**lowercase ).to(lowercase ) _UpperCAmelCase = new_value if fpaa_statistics is not None: setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(lowercase ) ) else: if value is None: _UpperCAmelCase = old_value.to(lowercase ) elif isinstance(lowercase ,torch.Tensor ): _UpperCAmelCase = value.to(lowercase ) else: _UpperCAmelCase = torch.tensor(lowercase ,device=lowercase ) if is_buffer: _UpperCAmelCase = new_value else: _UpperCAmelCase = nn.Parameter(lowercase ,requires_grad=old_value.requires_grad ) _UpperCAmelCase = new_value def __UpperCAmelCase ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ): """simple docstring""" for name, module in model.named_children(): if current_key_name is None: _UpperCAmelCase = [] current_key_name.append(lowercase ) if (isinstance(lowercase ,nn.Linear ) or isinstance(lowercase ,lowercase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(lowercase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase ,lowercase ): _UpperCAmelCase , _UpperCAmelCase = module.weight.shape else: _UpperCAmelCase = module.in_features _UpperCAmelCase = module.out_features if quantization_config.quantization_method() == "llm_int8": _UpperCAmelCase = bnb.nn.LinearabitLt( lowercase ,lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,) _UpperCAmelCase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _UpperCAmelCase = bnb.nn.Linearabit( lowercase ,lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,) _UpperCAmelCase = True # Store the module class in case we need to transpose the weight later _UpperCAmelCase = type(lowercase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase ) if len(list(module.children() ) ) > 0: _UpperCAmelCase , _UpperCAmelCase = _replace_with_bnb_linear( lowercase ,lowercase ,lowercase ,lowercase ,has_been_replaced=lowercase ,) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCAmelCase ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ): """simple docstring""" _UpperCAmelCase = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert _UpperCAmelCase , _UpperCAmelCase = _replace_with_bnb_linear( lowercase ,lowercase ,lowercase ,lowercase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __UpperCAmelCase ( *lowercase ,**lowercase ): """simple docstring""" warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,lowercase ,) return replace_with_bnb_linear(*lowercase ,**lowercase ) def __UpperCAmelCase ( *lowercase ,**lowercase ): """simple docstring""" warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,lowercase ,) return set_module_quantized_tensor_to_device(*lowercase ,**lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = deepcopy(lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _UpperCAmelCase = find_tied_parameters(lowercase ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase ,lowercase ): _UpperCAmelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: _UpperCAmelCase = sum(lowercase ,[] ) _UpperCAmelCase = len(lowercase ) > 0 # Check if it is a base model _UpperCAmelCase = not hasattr(lowercase ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _UpperCAmelCase = list(model.named_children() ) _UpperCAmelCase = [list_modules[-1][0]] # add last module together with tied weights _UpperCAmelCase = set(lowercase ) - set(lowercase ) _UpperCAmelCase = list(set(lowercase ) ) + list(lowercase ) # remove ".weight" from the keys _UpperCAmelCase = [""".weight""", """.bias"""] _UpperCAmelCase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _UpperCAmelCase = name.replace(lowercase ,"""""" ) filtered_module_names.append(lowercase ) return filtered_module_names
30
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __UpperCAmelCase ( *lowercase ): """simple docstring""" if not isinstance(lowercase ,lowercase ): _UpperCAmelCase = list(lowercase ) for i in range(len(lowercase ) ): _UpperCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ): """simple docstring""" if function is None: return functools.partial(lowercase ,starting_batch_size=lowercase ) _UpperCAmelCase = starting_batch_size def decorator(*lowercase ,**lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() ) # Guard against user error if len(lowercase ) < (len(lowercase ) + 1): _UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowercase ,*lowercase ,**lowercase ) except Exception as e: if should_reduce_batch_size(lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
30
1
"""simple docstring""" from math import pow, sqrt def __UpperCAmelCase ( *lowercase ): """simple docstring""" _UpperCAmelCase = len(lowercase ) > 0 and all(value > 0.0 for value in values ) return result def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return ( round(sqrt(molar_mass_a / molar_mass_a ) ,6 ) if validate(lowercase ,lowercase ) else ValueError("""Input Error: Molar mass values must greater than 0.""" ) ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) ,6 ) if validate(lowercase ,lowercase ,lowercase ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) ,6 ) if validate(lowercase ,lowercase ,lowercase ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a ,2 ) ,6 ) if validate(lowercase ,lowercase ,lowercase ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" return ( round(pow(effusion_rate_a / effusion_rate_a ,2 ) / molar_mass ,6 ) if validate(lowercase ,lowercase ,lowercase ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) )
30
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : Dict = ( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Dict = False _snake_case : List[str] = False def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : int ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = [1, 6, 3_0522] self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" if index == number_of_items: return 0 _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = knapsack(lowercase ,lowercase ,lowercase ,lowercase ,index + 1 ) if weights[index] <= max_weight: _UpperCAmelCase = values[index] + knapsack( lowercase ,lowercase ,lowercase ,max_weight - weights[index] ,index + 1 ) return max(lowercase ,lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class a ( lowerCAmelCase_ ): _snake_case : int = 'van' def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = mlp_ratios _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = dropout_rate
30
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = 'gptj' _snake_case : int = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Any , __lowerCAmelCase : int=5_0400 , __lowerCAmelCase : Optional[Any]=2048 , __lowerCAmelCase : Optional[Any]=4096 , __lowerCAmelCase : str=28 , __lowerCAmelCase : str=16 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict="gelu_new" , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : int=5_0256 , __lowerCAmelCase : str=5_0256 , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : Tuple , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = n_inner _UpperCAmelCase = rotary_dim _UpperCAmelCase = activation_function _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = use_cache _UpperCAmelCase = bos_token_id _UpperCAmelCase = eos_token_id super().__init__( bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase ) class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" , __lowerCAmelCase : List[PatchingSpec] = None , __lowerCAmelCase : bool = False , ): super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase ) if not getattr(self._config , """pad_token_id""" , __lowerCAmelCase ): # TODO: how to do that better? _UpperCAmelCase = 0 @property def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" ) _UpperCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: _UpperCAmelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCAmelCase_ ( self : List[str] ): return self._config.n_layer @property def lowerCAmelCase_ ( self : str ): return self._config.n_head def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ): _UpperCAmelCase = super(__lowerCAmelCase , self ).generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) # We need to order the input in the way they appears in the forward() _UpperCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _UpperCAmelCase , _UpperCAmelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _UpperCAmelCase = seqlen + 2 _UpperCAmelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCAmelCase = [ (torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers ) ] _UpperCAmelCase = common_inputs["""attention_mask"""] if self.use_past: _UpperCAmelCase = ordered_inputs["""attention_mask"""].dtype _UpperCAmelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase_ ( self : Tuple ): return 13
30
"""simple docstring""" def __UpperCAmelCase ( lowercase = 10_00 ): """simple docstring""" _UpperCAmelCase = 2**power _UpperCAmelCase = 0 while n: _UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [0] * len(lowercase ) _UpperCAmelCase = [] _UpperCAmelCase = [1] * len(lowercase ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase ) ): if indegree[i] == 0: queue.append(lowercase ) while queue: _UpperCAmelCase = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: _UpperCAmelCase = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase ) print(max(lowercase ) ) # Adjacency list of Graph UpperCAmelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
30
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if config is None: assert isinstance(self.model , __lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if self.optimizer is None: _UpperCAmelCase = ["""bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , ) else: _UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase ) return scheduler def lowerCAmelCase_ ( self : Optional[int] ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = inputs.pop("""labels""" ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return loss def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ): _UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase ) _UpperCAmelCase = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) _UpperCAmelCase = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): # If PAD token is not defined at least EOS token has to be defined _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" def count_of_possible_combinations(lowercase ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" def count_of_possible_combinations_with_dp_array( lowercase ,lowercase ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] _UpperCAmelCase = sum( count_of_possible_combinations_with_dp_array(target - item ,lowercase ) for item in array ) _UpperCAmelCase = answer return answer _UpperCAmelCase = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = [0] * (target + 1) _UpperCAmelCase = 1 for i in range(1 ,target + 1 ): for j in range(lowercase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = 3 UpperCAmelCase__ = 5 UpperCAmelCase__ = [1, 2, 5] print(combination_sum_iv(n, array, target))
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
30
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = BitConfig( global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,) _UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(lowercase ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(lowercase ).unsqueeze(0 ) _UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase ,lowercase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(lowercase ) _UpperCAmelCase = outputs.logits print("""Predicted class:""" ,logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 ) else: _UpperCAmelCase = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
1
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class a : def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple ): raise NotImplementedError() def lowerCAmelCase_ ( self : List[Any] ): raise NotImplementedError() class a ( lowerCAmelCase_ ): def __init__( self : List[Any] , __lowerCAmelCase : "AutoTokenizer" , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = tokenizer _UpperCAmelCase = skip_prompt _UpperCAmelCase = decode_kwargs # variables used in the streaming process _UpperCAmelCase = [] _UpperCAmelCase = 0 _UpperCAmelCase = True def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError("""TextStreamer only supports batch size 1""" ) elif len(value.shape ) > 1: _UpperCAmelCase = value[0] if self.skip_prompt and self.next_tokens_are_prompt: _UpperCAmelCase = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) _UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith("""\n""" ): _UpperCAmelCase = text[self.print_len :] _UpperCAmelCase = [] _UpperCAmelCase = 0 # If the last token is a CJK character, we print the characters. elif len(__lowerCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): _UpperCAmelCase = text[self.print_len :] self.print_len += len(__lowerCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: _UpperCAmelCase = text[self.print_len : text.rfind(""" """ ) + 1] self.print_len += len(__lowerCAmelCase ) self.on_finalized_text(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): # Flush the cache, if it exists if len(self.token_cache ) > 0: _UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) _UpperCAmelCase = text[self.print_len :] _UpperCAmelCase = [] _UpperCAmelCase = 0 else: _UpperCAmelCase = """""" _UpperCAmelCase = True self.on_finalized_text(__lowerCAmelCase , stream_end=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ): print(__lowerCAmelCase , flush=__lowerCAmelCase , end="""""" if not stream_end else None ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e_00 and cp <= 0x9f_ff) or (cp >= 0x34_00 and cp <= 0x4d_bf) # or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) # or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) # or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) # or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) # or (cp >= 0xf9_00 and cp <= 0xfa_ff) or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) # ): # return True return False class a ( lowerCAmelCase_ ): def __init__( self : Tuple , __lowerCAmelCase : "AutoTokenizer" , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[float] = None , **__lowerCAmelCase : List[str] ): super().__init__(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = Queue() _UpperCAmelCase = None _UpperCAmelCase = timeout def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : bool = False ): self.text_queue.put(__lowerCAmelCase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : Optional[Any] ): return self def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" from collections.abc import Callable def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = a _UpperCAmelCase = b if function(lowercase ) == 0: # one of the a or b is a root for the function return a elif function(lowercase ) == 0: return b elif ( function(lowercase ) * function(lowercase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: _UpperCAmelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase ) == 0: return mid elif function(lowercase ) * function(lowercase ) < 0: _UpperCAmelCase = mid else: _UpperCAmelCase = mid _UpperCAmelCase = start + (end - start) / 2.0 return mid def __UpperCAmelCase ( lowercase ): """simple docstring""" return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
30
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
30
1
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase_ ) class a ( lowerCAmelCase_ ): def __init__( self : str , **__lowerCAmelCase : Optional[Any] ): super().__init__(**__lowerCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type(__lowerCAmelCase ) def __call__( self : int , __lowerCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCAmelCase : Union[str, List[str]] = None , **__lowerCAmelCase : Dict , ): if "text_queries" in kwargs: _UpperCAmelCase = kwargs.pop("""text_queries""" ) if isinstance(__lowerCAmelCase , (str, Image.Image) ): _UpperCAmelCase = {"""image""": image, """candidate_labels""": candidate_labels} else: _UpperCAmelCase = image _UpperCAmelCase = super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) return results def lowerCAmelCase_ ( self : Tuple , **__lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = {} if "threshold" in kwargs: _UpperCAmelCase = kwargs["""threshold"""] if "top_k" in kwargs: _UpperCAmelCase = kwargs["""top_k"""] return {}, {}, postprocess_params def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int ): _UpperCAmelCase = load_image(inputs["""image"""] ) _UpperCAmelCase = inputs["""candidate_labels"""] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = candidate_labels.split(""",""" ) _UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(__lowerCAmelCase ): _UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework ) _UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(__lowerCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = model_inputs.pop("""target_size""" ) _UpperCAmelCase = model_inputs.pop("""candidate_label""" ) _UpperCAmelCase = model_inputs.pop("""is_last""" ) _UpperCAmelCase = self.model(**__lowerCAmelCase ) _UpperCAmelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=None ): _UpperCAmelCase = [] for model_output in model_outputs: _UpperCAmelCase = model_output["""candidate_label"""] _UpperCAmelCase = BaseModelOutput(__lowerCAmelCase ) _UpperCAmelCase = self.image_processor.post_process_object_detection( outputs=__lowerCAmelCase , threshold=__lowerCAmelCase , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): _UpperCAmelCase = outputs["""scores"""][index].item() _UpperCAmelCase = self._get_bounding_box(outputs["""boxes"""][index][0] ) _UpperCAmelCase = {"""score""": score, """label""": label, """box""": box} results.append(__lowerCAmelCase ) _UpperCAmelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase ) if top_k: _UpperCAmelCase = results[:top_k] return results def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : "torch.Tensor" ): if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist() _UpperCAmelCase = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
30
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig UpperCAmelCase__ = logging.get_logger(__name__) # General docstring UpperCAmelCase__ = """MobileNetV1Config""" # Base docstring UpperCAmelCase__ = """google/mobilenet_v1_1.0_224""" UpperCAmelCase__ = [1, 1_0_2_4, 7, 7] # Image classification docstring UpperCAmelCase__ = """google/mobilenet_v1_1.0_224""" UpperCAmelCase__ = """tabby, tabby cat""" UpperCAmelCase__ = [ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=None ): """simple docstring""" _UpperCAmelCase = {} if isinstance(lowercase ,lowercase ): _UpperCAmelCase = model.mobilenet_va else: _UpperCAmelCase = model _UpperCAmelCase = """MobilenetV1/Conv2d_0/""" _UpperCAmelCase = backbone.conv_stem.convolution.weight _UpperCAmelCase = backbone.conv_stem.normalization.bias _UpperCAmelCase = backbone.conv_stem.normalization.weight _UpperCAmelCase = backbone.conv_stem.normalization.running_mean _UpperCAmelCase = backbone.conv_stem.normalization.running_var for i in range(13 ): _UpperCAmelCase = i + 1 _UpperCAmelCase = i * 2 _UpperCAmelCase = backbone.layer[pt_index] _UpperCAmelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' _UpperCAmelCase = pointer.convolution.weight _UpperCAmelCase = pointer.normalization.bias _UpperCAmelCase = pointer.normalization.weight _UpperCAmelCase = pointer.normalization.running_mean _UpperCAmelCase = pointer.normalization.running_var _UpperCAmelCase = backbone.layer[pt_index + 1] _UpperCAmelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' _UpperCAmelCase = pointer.convolution.weight _UpperCAmelCase = pointer.normalization.bias _UpperCAmelCase = pointer.normalization.weight _UpperCAmelCase = pointer.normalization.running_mean _UpperCAmelCase = pointer.normalization.running_var if isinstance(lowercase ,lowercase ): _UpperCAmelCase = """MobilenetV1/Logits/Conv2d_1c_1x1/""" _UpperCAmelCase = model.classifier.weight _UpperCAmelCase = model.classifier.bias return tf_to_pt_map def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model _UpperCAmelCase = tf.train.list_variables(lowercase ) _UpperCAmelCase = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''' ) _UpperCAmelCase = tf.train.load_variable(lowercase ,lowercase ) _UpperCAmelCase = array # Build TF to PyTorch weights loading map _UpperCAmelCase = _build_tf_to_pytorch_map(lowercase ,lowercase ,lowercase ) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''' ) if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''' ) continue _UpperCAmelCase = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) _UpperCAmelCase = np.transpose(lowercase ,(2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer _UpperCAmelCase = array.squeeze().transpose() else: _UpperCAmelCase = np.transpose(lowercase ,(3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(f'''Initialize PyTorch weight {name} {array.shape}''' ) _UpperCAmelCase = torch.from_numpy(lowercase ) tf_weights.pop(lowercase ,lowercase ) tf_weights.pop(name + """/RMSProp""" ,lowercase ) tf_weights.pop(name + """/RMSProp_1""" ,lowercase ) tf_weights.pop(name + """/ExponentialMovingAverage""" ,lowercase ) logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}''' ) return model def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = features.shape[-2:] _UpperCAmelCase , _UpperCAmelCase = conv_layer.stride _UpperCAmelCase , _UpperCAmelCase = conv_layer.kernel_size if in_height % stride_height == 0: _UpperCAmelCase = max(kernel_height - stride_height ,0 ) else: _UpperCAmelCase = max(kernel_height - (in_height % stride_height) ,0 ) if in_width % stride_width == 0: _UpperCAmelCase = max(kernel_width - stride_width ,0 ) else: _UpperCAmelCase = max(kernel_width - (in_width % stride_width) ,0 ) _UpperCAmelCase = pad_along_width // 2 _UpperCAmelCase = pad_along_width - pad_left _UpperCAmelCase = pad_along_height // 2 _UpperCAmelCase = pad_along_height - pad_top _UpperCAmelCase = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowercase ,lowercase ,"""constant""" ,0.0 ) class a ( nn.Module ): def __init__( self : Union[str, Any] , __lowerCAmelCase : MobileNetVaConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[bool or str] = True , ): super().__init__() _UpperCAmelCase = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) _UpperCAmelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) _UpperCAmelCase = nn.Convad( in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=__lowerCAmelCase , groups=__lowerCAmelCase , bias=__lowerCAmelCase , padding_mode="""zeros""" , ) if use_normalization: _UpperCAmelCase = nn.BatchNormad( num_features=__lowerCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=__lowerCAmelCase , track_running_stats=__lowerCAmelCase , ) else: _UpperCAmelCase = None if use_activation: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = ACTaFN[use_activation] elif isinstance(config.hidden_act , __lowerCAmelCase ): _UpperCAmelCase = ACTaFN[config.hidden_act] else: _UpperCAmelCase = config.hidden_act else: _UpperCAmelCase = None def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : torch.Tensor ): if self.config.tf_padding: _UpperCAmelCase = apply_tf_padding(__lowerCAmelCase , self.convolution ) _UpperCAmelCase = self.convolution(__lowerCAmelCase ) if self.normalization is not None: _UpperCAmelCase = self.normalization(__lowerCAmelCase ) if self.activation is not None: _UpperCAmelCase = self.activation(__lowerCAmelCase ) return features class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = MobileNetVaConfig _snake_case : Tuple = load_tf_weights_in_mobilenet_va _snake_case : List[Any] = 'mobilenet_v1' _snake_case : str = 'pixel_values' _snake_case : int = False def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[nn.Linear, nn.Convad] ): if isinstance(__lowerCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__lowerCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) UpperCAmelCase__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , lowerCAmelCase_ , ) class a ( lowerCAmelCase_ ): def __init__( self : str , __lowerCAmelCase : MobileNetVaConfig , __lowerCAmelCase : bool = True ): super().__init__(__lowerCAmelCase ) _UpperCAmelCase = config _UpperCAmelCase = 32 _UpperCAmelCase = max(int(depth * config.depth_multiplier ) , config.min_depth ) _UpperCAmelCase = MobileNetVaConvLayer( __lowerCAmelCase , in_channels=config.num_channels , out_channels=__lowerCAmelCase , kernel_size=3 , stride=2 , ) _UpperCAmelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] _UpperCAmelCase = nn.ModuleList() for i in range(13 ): _UpperCAmelCase = out_channels if strides[i] == 2 or i == 0: depth *= 2 _UpperCAmelCase = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=3 , stride=strides[i] , groups=__lowerCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( __lowerCAmelCase , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , kernel_size=1 , ) ) _UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Union[str, Any] ): raise NotImplementedError @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , ): _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) _UpperCAmelCase = self.conv_stem(__lowerCAmelCase ) _UpperCAmelCase = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): _UpperCAmelCase = layer_module(__lowerCAmelCase ) if output_hidden_states: _UpperCAmelCase = all_hidden_states + (hidden_states,) _UpperCAmelCase = hidden_states if self.pooler is not None: _UpperCAmelCase = torch.flatten(self.pooler(__lowerCAmelCase ) , start_dim=1 ) else: _UpperCAmelCase = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=__lowerCAmelCase , ) @add_start_docstrings( '\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase_ , ) class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : MobileNetVaConfig ): super().__init__(__lowerCAmelCase ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = MobileNetVaModel(__lowerCAmelCase ) _UpperCAmelCase = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head _UpperCAmelCase = nn.Dropout(config.classifier_dropout_prob , inplace=__lowerCAmelCase ) _UpperCAmelCase = nn.Linear(__lowerCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[torch.Tensor] = None , __lowerCAmelCase : Optional[bool] = None , ): _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.mobilenet_va(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase ) _UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase = self.classifier(self.dropout(__lowerCAmelCase ) ) _UpperCAmelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCAmelCase = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCAmelCase = """single_label_classification""" else: _UpperCAmelCase = """multi_label_classification""" if self.config.problem_type == "regression": _UpperCAmelCase = MSELoss() if self.num_labels == 1: _UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCAmelCase = BCEWithLogitsLoss() _UpperCAmelCase = loss_fct(__lowerCAmelCase , __lowerCAmelCase ) if not return_dict: _UpperCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states , )
30
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" def __UpperCAmelCase ( lowercase ): """simple docstring""" # authorize twitter, initialize tweepy _UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase ) auth.set_access_token(lowercase ,lowercase ) _UpperCAmelCase = tweepy.API(lowercase ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 ) # save most recent tweets alltweets.extend(lowercase ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=lowercase ,count=2_00 ,max_id=lowercase ) # save most recent tweets alltweets.extend(lowercase ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'''...{len(lowercase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f: _UpperCAmelCase = csv.writer(lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
30
1
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class a ( nn.Module ): _snake_case : int _snake_case : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Any , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape _UpperCAmelCase = jax.image.resize( __lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , ) _UpperCAmelCase = self.conv(__lowerCAmelCase ) return hidden_states class a ( nn.Module ): _snake_case : int _snake_case : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : int , __lowerCAmelCase : List[Any] ): # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _UpperCAmelCase = self.conv(__lowerCAmelCase ) return hidden_states class a ( nn.Module ): _snake_case : int _snake_case : int = None _snake_case : float = 0.0 _snake_case : bool = None _snake_case : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCAmelCase = nn.Conv( __lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase = nn.Dense(__lowerCAmelCase , dtype=self.dtype ) _UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCAmelCase = nn.Dropout(self.dropout_prob ) _UpperCAmelCase = nn.Conv( __lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _UpperCAmelCase = None if use_nin_shortcut: _UpperCAmelCase = nn.Conv( __lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , ) def __call__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=True ): _UpperCAmelCase = hidden_states _UpperCAmelCase = self.norma(__lowerCAmelCase ) _UpperCAmelCase = nn.swish(__lowerCAmelCase ) _UpperCAmelCase = self.conva(__lowerCAmelCase ) _UpperCAmelCase = self.time_emb_proj(nn.swish(__lowerCAmelCase ) ) _UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase , 1 ) , 1 ) _UpperCAmelCase = hidden_states + temb _UpperCAmelCase = self.norma(__lowerCAmelCase ) _UpperCAmelCase = nn.swish(__lowerCAmelCase ) _UpperCAmelCase = self.dropout(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self.conva(__lowerCAmelCase ) if self.conv_shortcut is not None: _UpperCAmelCase = self.conv_shortcut(__lowerCAmelCase ) return hidden_states + residual
30
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""] _UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments) UpperCAmelCase__ = parser.parse_args() if args.num_workers is None: UpperCAmelCase__ = multiprocessing.cpu_count() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase__ = time.time() UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() UpperCAmelCase__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
30
1
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCAmelCase__ = """src/diffusers""" # Matches is_xxx_available() UpperCAmelCase__ = re.compile(r"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla UpperCAmelCase__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") UpperCAmelCase__ = """ {0} = None """ UpperCAmelCase__ = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ UpperCAmelCase__ = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = _re_backend.findall(lowercase ) if len(lowercase ) == 0: return None return "_and_".join(lowercase ) def __UpperCAmelCase ( ): """simple docstring""" with open(os.path.join(lowercase ,"""__init__.py""" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.readlines() # Get to the point we do the actual imports for type checking _UpperCAmelCase = 0 _UpperCAmelCase = {} # Go through the end of the file while line_index < len(lowercase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _UpperCAmelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 _UpperCAmelCase = [] # Until we unindent, add backend objects to the list while line_index < len(lowercase ) and len(lines[line_index] ) > 1: _UpperCAmelCase = lines[line_index] _UpperCAmelCase = _re_single_line_import.search(lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(lowercase ) > 0: _UpperCAmelCase = objects else: line_index += 1 return backend_specific_objects def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(lowercase ) elif name.islower(): return DUMMY_FUNCTION.format(lowercase ,lowercase ) else: return DUMMY_CLASS.format(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase=None ): """simple docstring""" if backend_specific_objects is None: _UpperCAmelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename _UpperCAmelCase = {} for backend, objects in backend_specific_objects.items(): _UpperCAmelCase = """[""" + """, """.join(f'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" _UpperCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(lowercase ,lowercase ) for o in objects] ) _UpperCAmelCase = dummy_file return dummy_files def __UpperCAmelCase ( lowercase=False ): """simple docstring""" _UpperCAmelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _UpperCAmelCase = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. _UpperCAmelCase = os.path.join(lowercase ,"""utils""" ) _UpperCAmelCase = { backend: os.path.join(lowercase ,f'''dummy_{short_names.get(lowercase ,lowercase )}_objects.py''' ) for backend in dummy_files.keys() } _UpperCAmelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(lowercase ): with open(lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: _UpperCAmelCase = f.read() else: _UpperCAmelCase = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'''Updating diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ f'''diffusers.utils.dummy_{short_names.get(lowercase ,lowercase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase__ = parser.parse_args() check_dummies(args.fix_and_overwrite)
30
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __UpperCAmelCase ( lowercase=None ,lowercase=None ): """simple docstring""" return field(default_factory=lambda: default ,metadata=lowercase ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The csv file to plot.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) _snake_case : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def __UpperCAmelCase ( lowercase ): """simple docstring""" try: int(lowercase ) return True except ValueError: return False def __UpperCAmelCase ( lowercase ): """simple docstring""" try: float(lowercase ) return True except ValueError: return False class a : def __init__( self : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = args _UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _UpperCAmelCase = csv.DictReader(__lowerCAmelCase ) for row in reader: _UpperCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _UpperCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _UpperCAmelCase = float(row["""result"""] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = plt.subplots() _UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _UpperCAmelCase = self.result_dict[model_name]["""result"""] ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: _UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" ) title_str += f''' {label_model_name} vs.''' _UpperCAmelCase = title_str[:-4] _UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser(lowercase ) _UpperCAmelCase = parser.parse_args_into_dataclasses()[0] _UpperCAmelCase = Plot(args=lowercase ) plot.plot() if __name__ == "__main__": main()
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" import os import pytest from attr import dataclass UpperCAmelCase__ = """us-east-1""" # defaults region @dataclass class a : _snake_case : str _snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' _snake_case : List[Any] = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_00, 'save_steps': 55_00, } _snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00} @property def lowerCAmelCase_ ( self : Optional[Any] ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase_ ( self : Dict ): return f'''{self.framework}-transfromers-test''' @property def lowerCAmelCase_ ( self : Union[str, Any] ): return f'''./tests/sagemaker/scripts/{self.framework}''' @property def lowerCAmelCase_ ( self : Dict ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" def merge(lowercase ,lowercase ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(lowercase ) <= 1: return collection _UpperCAmelCase = len(lowercase ) // 2 return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
30
"""simple docstring""" import string from math import logaa def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = document.translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" ) _UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = corpus.lower().translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("""\n""" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowercase )) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) ,3 ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return round(tf * idf ,3 )
30
1
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a : def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=13 , __lowerCAmelCase : List[Any]=30 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Any=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Any=2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = scope _UpperCAmelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = num_patches + 2 def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Optional[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = TFDeiTModel(config=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str ): _UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCAmelCase ) _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ): _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = TFDeiTForImageClassification(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = TFDeiTForImageClassification(__lowerCAmelCase ) _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _snake_case : Tuple = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _snake_case : List[Any] = False _snake_case : str = False _snake_case : Any = False _snake_case : Union[str, Any] = False def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = TFDeiTModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def lowerCAmelCase_ ( self : Any ): pass def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase_ ( self : List[Any] ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self : Dict ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" ) # forward pass _UpperCAmelCase = model(**__lowerCAmelCase ) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
30
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) else: _UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) _UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""] _UpperCAmelCase = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: _UpperCAmelCase = key.split(""".""" ) if attributes[0] == "lm_head": _UpperCAmelCase = prophet _UpperCAmelCase = prophet_old else: _UpperCAmelCase = prophet.prophetnet _UpperCAmelCase = prophet_old.model _UpperCAmelCase = False for attribute in attributes: if attribute in mapping: _UpperCAmelCase = mapping[attribute] if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0: _UpperCAmelCase = attribute elif hasattr(lowercase ,lowercase ): _UpperCAmelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _UpperCAmelCase = old_model.weight logger.info(f'''{attribute} is initialized.''' ) _UpperCAmelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _UpperCAmelCase = old_model.bias logger.info(f'''{attribute} is initialized''' ) _UpperCAmelCase = True break elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ): _UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3 _UpperCAmelCase = getattr(lowercase ,lowercase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _UpperCAmelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." _UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) _UpperCAmelCase = True break if attribute.isdigit(): _UpperCAmelCase = model[int(lowercase )] _UpperCAmelCase = old_model[int(lowercase )] else: _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_attribute == "": _UpperCAmelCase = old_model else: if not hasattr(lowercase ,lowercase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) _UpperCAmelCase = getattr(lowercase ,lowercase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
30
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a : def __init__( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Tuple=[10, 20, 30, 40] , __lowerCAmelCase : Optional[Any]=[1, 1, 2, 1] , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : int="relu" , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = embeddings_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_act _UpperCAmelCase = num_labels _UpperCAmelCase = scope _UpperCAmelCase = len(__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Dict ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ): _UpperCAmelCase = TFRegNetModel(config=__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , training=__lowerCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFRegNetForImageClassification(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : int = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () _snake_case : Any = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) _snake_case : Any = False _snake_case : Optional[int] = False _snake_case : List[Any] = False _snake_case : str = False _snake_case : Dict = False def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = TFRegNetModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): return @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def lowerCAmelCase_ ( self : List[str] ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def lowerCAmelCase_ ( self : List[str] ): super().test_keras_fit() @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def lowerCAmelCase_ ( self : Optional[int] ): pass def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): def check_hidden_states_output(__lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ): _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) , training=__lowerCAmelCase ) _UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCAmelCase = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCAmelCase = layer_type _UpperCAmelCase = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple={} ): _UpperCAmelCase = model(__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ).to_tuple() def recursive_check(__lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ): if isinstance(__lowerCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase , __lowerCAmelCase ): recursive_check(__lowerCAmelCase , __lowerCAmelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__lowerCAmelCase , __lowerCAmelCase ) ) , msg=( """Tuple and dict output are not equal. Difference:""" f''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"""output_hidden_states""": True} ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) _UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"""output_hidden_states""": True} ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Any ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = TFRegNetModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" ) # forward pass _UpperCAmelCase = model(**__lowerCAmelCase , training=__lowerCAmelCase ) # verify the logits _UpperCAmelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant([-0.4_180, -1.5_051, -3.4_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
30
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Transformers CLI tool""" ,usage="""transformers-cli <command> [<args>]""" ) _UpperCAmelCase = parser.add_subparsers(help="""transformers-cli command helpers""" ) # Register commands ConvertCommand.register_subcommand(lowercase ) DownloadCommand.register_subcommand(lowercase ) EnvironmentCommand.register_subcommand(lowercase ) RunCommand.register_subcommand(lowercase ) ServeCommand.register_subcommand(lowercase ) UserCommands.register_subcommand(lowercase ) AddNewModelCommand.register_subcommand(lowercase ) AddNewModelLikeCommand.register_subcommand(lowercase ) LfsCommands.register_subcommand(lowercase ) PTtoTFCommand.register_subcommand(lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run _UpperCAmelCase = args.func(lowercase ) service.run() if __name__ == "__main__": main()
30
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'upernet' def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config.get("""model_type""" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__lowerCAmelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Dict ): if self.framework == "pytorch": subprocess.run( f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCAmelCase , ) assert hasattr(self , """env""" ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] ): # configuration for running training on smdistributed Model Parallel _UpperCAmelCase = { """enabled""": True, """processes_per_host""": 8, } _UpperCAmelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } _UpperCAmelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} _UpperCAmelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int ): TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] ): # create estimator _UpperCAmelCase = self.create_estimator(__lowerCAmelCase ) # run training estimator.fit() # result dataframe _UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) _UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
30
"""simple docstring""" from itertools import product def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(lowercase ,max_face_number + 1 ) for dice_numbers in product(lowercase ,repeat=lowercase ): _UpperCAmelCase = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase = 10_00 ): """simple docstring""" _UpperCAmelCase = 2**power _UpperCAmelCase = 0 while n: _UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
30
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[Any] = 'vision-encoder-decoder' _snake_case : Optional[int] = True def __init__( self : int , **__lowerCAmelCase : Any ): super().__init__(**__lowerCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _UpperCAmelCase = kwargs.pop("""encoder""" ) _UpperCAmelCase = encoder_config.pop("""model_type""" ) _UpperCAmelCase = kwargs.pop("""decoder""" ) _UpperCAmelCase = decoder_config.pop("""model_type""" ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = True @classmethod def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ): logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _UpperCAmelCase = True _UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.encoder.to_dict() _UpperCAmelCase = self.decoder.to_dict() _UpperCAmelCase = self.__class__.model_type return output class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : Tuple ): return 1e-4 @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = OrderedDict() _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ): import torch _UpperCAmelCase = OrderedDict() _UpperCAmelCase = super().generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape _UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) _UpperCAmelCase = dummy_input.pop("""input_ids""" ) _UpperCAmelCase = dummy_input.pop("""attention_mask""" ) _UpperCAmelCase = torch.zeros(__lowerCAmelCase ) return common_inputs class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ): _UpperCAmelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = int(lowercase ) # Initialize Result _UpperCAmelCase = [] # Traverse through all denomination for denomination in reversed(lowercase ): # Find denominations while int(lowercase ) >= int(lowercase ): total_value -= int(lowercase ) answer.append(lowercase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCAmelCase__ = [] UpperCAmelCase__ = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): UpperCAmelCase__ = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) UpperCAmelCase__ = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter UpperCAmelCase__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCAmelCase__ = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F'''Following is minimal change for {value}: ''') UpperCAmelCase__ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
30
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) UpperCAmelCase__ = CLIPImageProcessor() UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") UpperCAmelCase__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" if len(lowercase ) <= 1: return [tuple(lowercase )] _UpperCAmelCase = [] def generate(lowercase ,lowercase ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 ,lowercase ) for i in range(k - 1 ): if k % 2 == 0: # k is even _UpperCAmelCase , _UpperCAmelCase = arr[k - 1], arr[i] else: # k is odd _UpperCAmelCase , _UpperCAmelCase = arr[k - 1], arr[0] generate(k - 1 ,lowercase ) generate(len(lowercase ) ,lowercase ) return res if __name__ == "__main__": UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
30
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __UpperCAmelCase ( *lowercase ): """simple docstring""" if not isinstance(lowercase ,lowercase ): _UpperCAmelCase = list(lowercase ) for i in range(len(lowercase ) ): _UpperCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ): """simple docstring""" if function is None: return functools.partial(lowercase ,starting_batch_size=lowercase ) _UpperCAmelCase = starting_batch_size def decorator(*lowercase ,**lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() ) # Guard against user error if len(lowercase ) < (len(lowercase ) + 1): _UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowercase ,*lowercase ,**lowercase ) except Exception as e: if should_reduce_batch_size(lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
30
1
"""simple docstring""" import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = jnp.ones((batch_size, length) ) / length return scores def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = None _UpperCAmelCase = 20 _UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=__lowerCAmelCase ) # tweak scores to not be uniform anymore _UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch _UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax _UpperCAmelCase = jax.nn.softmax(__lowerCAmelCase , axis=-1 ) _UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 ) _UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(__lowerCAmelCase , scores.copy() , cur_len=__lowerCAmelCase ) , axis=-1 ) _UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(__lowerCAmelCase , scores.copy() , cur_len=__lowerCAmelCase ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = None _UpperCAmelCase = 10 _UpperCAmelCase = 2 # create ramp distribution _UpperCAmelCase = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() _UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size _UpperCAmelCase = FlaxTopKLogitsWarper(3 ) _UpperCAmelCase = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case _UpperCAmelCase = 5 _UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) _UpperCAmelCase = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, length) ).copy() _UpperCAmelCase = top_k_warp_safety_check(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = None _UpperCAmelCase = 10 _UpperCAmelCase = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) _UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) _UpperCAmelCase = FlaxTopPLogitsWarper(0.8 ) _UpperCAmelCase = np.exp(top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 _UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) ) # check edge cases with negative and extreme logits _UpperCAmelCase = np.broadcast_to(np.arange(__lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme _UpperCAmelCase = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept _UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) _UpperCAmelCase = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = 20 _UpperCAmelCase = 4 _UpperCAmelCase = 0 _UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowerCAmelCase ) # check that min length is applied at length 5 _UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 ) _UpperCAmelCase = 5 _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = min_dist_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] ) # check that min length is not applied anymore at length 15 _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = 15 _UpperCAmelCase = min_dist_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 20 _UpperCAmelCase = 4 _UpperCAmelCase = 0 _UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase ) # check that all scores are -inf except the bos_token_id score _UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 ) _UpperCAmelCase = 1 _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 _UpperCAmelCase = 3 _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = 20 _UpperCAmelCase = 4 _UpperCAmelCase = 0 _UpperCAmelCase = 5 _UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) # check that all scores are -inf except the eos_token_id when max_length is reached _UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 ) _UpperCAmelCase = 4 _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached _UpperCAmelCase = 3 _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = logits_processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) self.assertFalse(jnp.isinf(__lowerCAmelCase ).any() ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = 4 _UpperCAmelCase = 10 _UpperCAmelCase = 15 _UpperCAmelCase = 2 _UpperCAmelCase = 1 _UpperCAmelCase = 15 # dummy input_ids and scores _UpperCAmelCase = ids_tensor((batch_size, sequence_length) , __lowerCAmelCase ) _UpperCAmelCase = input_ids.copy() _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = scores.copy() # instantiate all dist processors _UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCAmelCase = FlaxTopKLogitsWarper(3 ) _UpperCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowerCAmelCase ) _UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase ) _UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _UpperCAmelCase = 10 # no processor list _UpperCAmelCase = temp_dist_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = min_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = bos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = eos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) # with processor list _UpperCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _UpperCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = 4 _UpperCAmelCase = 10 _UpperCAmelCase = 15 _UpperCAmelCase = 2 _UpperCAmelCase = 1 _UpperCAmelCase = 15 # dummy input_ids and scores _UpperCAmelCase = ids_tensor((batch_size, sequence_length) , __lowerCAmelCase ) _UpperCAmelCase = input_ids.copy() _UpperCAmelCase = self._get_uniform_logits(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = scores.copy() # instantiate all dist processors _UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 ) _UpperCAmelCase = FlaxTopKLogitsWarper(3 ) _UpperCAmelCase = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__lowerCAmelCase ) _UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__lowerCAmelCase ) _UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _UpperCAmelCase = 10 # no processor list def run_no_processor_list(__lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = temp_dist_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = top_k_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = top_p_warp(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = min_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = bos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) _UpperCAmelCase = eos_dist_proc(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) return scores # with processor list def run_processor_list(__lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _UpperCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , cur_len=__lowerCAmelCase ) return scores _UpperCAmelCase = jax.jit(__lowerCAmelCase ) _UpperCAmelCase = jax.jit(__lowerCAmelCase ) _UpperCAmelCase = jitted_run_no_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jitted_run_processor_list(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # scores should be equal self.assertTrue(jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
30
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : Dict = ( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Dict = False _snake_case : List[str] = False def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : int ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = [1, 6, 3_0522] self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
30
1
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = 10 def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__lowerCAmelCase , self.block_size , 0 ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = """""" _UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , [] ) self.assertEqual(__lowerCAmelCase , [] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) _UpperCAmelCase , _UpperCAmelCase = process_story(__lowerCAmelCase ) _UpperCAmelCase = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = ["""It was the best of times."""] self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 0 ).numpy() , expected.numpy() ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 23 ).numpy() , expected.numpy() ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__lowerCAmelCase , 1 ).numpy() , expected.numpy() ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = 101 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__lowerCAmelCase , __lowerCAmelCase ) np.testing.assert_array_equal(__lowerCAmelCase , __lowerCAmelCase )
30
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class a ( lowerCAmelCase_ ): _snake_case : int = 'van' def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = mlp_ratios _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = dropout_rate
30
1
"""simple docstring""" import sys UpperCAmelCase__ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __UpperCAmelCase ( lowercase = N ): """simple docstring""" _UpperCAmelCase = -sys.maxsize - 1 for i in range(len(lowercase ) - 12 ): _UpperCAmelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: _UpperCAmelCase = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
30
"""simple docstring""" def __UpperCAmelCase ( lowercase = 10_00 ): """simple docstring""" _UpperCAmelCase = 2**power _UpperCAmelCase = 0 while n: _UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
30
1
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if config is None: assert isinstance(self.model , __lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if self.optimizer is None: _UpperCAmelCase = ["""bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , ) else: _UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase ) return scheduler def lowerCAmelCase_ ( self : Optional[int] ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = inputs.pop("""labels""" ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return loss def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ): _UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase ) _UpperCAmelCase = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) _UpperCAmelCase = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): # If PAD token is not defined at least EOS token has to be defined _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
30
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") UpperCAmelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a : _snake_case : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) _snake_case : Optional[str] = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the training data.'} ) _snake_case : Optional[str] = field(default=lowerCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} ) _snake_case : Optional[float] = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) _snake_case : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) _snake_case : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) _snake_case : Optional[int] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _snake_case : Optional[int] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = {} if self.train_dir is not None: _UpperCAmelCase = self.train_dir if self.validation_dir is not None: _UpperCAmelCase = self.validation_dir _UpperCAmelCase = data_files if data_files else None @dataclass class a : _snake_case : str = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase_ )} , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) _snake_case : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _snake_case : str = field(default=lowerCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) _snake_case : Optional[int] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) _snake_case : Optional[int] = field( default=lowerCAmelCase_ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) _snake_case : Optional[int] = field( default=lowerCAmelCase_ , metadata={'help': 'Stride to use for the encoder.'} , ) class a : def __init__( self : Any , __lowerCAmelCase : Optional[Any]=192 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : List[str]=0.6 ): _UpperCAmelCase = input_size _UpperCAmelCase = mask_patch_size _UpperCAmelCase = model_patch_size _UpperCAmelCase = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("""Input size must be divisible by mask patch size""" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("""Mask patch size must be divisible by model patch size""" ) _UpperCAmelCase = self.input_size // self.mask_patch_size _UpperCAmelCase = self.mask_patch_size // self.model_patch_size _UpperCAmelCase = self.rand_size**2 _UpperCAmelCase = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : int ): _UpperCAmelCase = np.random.permutation(self.token_count )[: self.mask_count] _UpperCAmelCase = np.zeros(self.token_count , dtype=__lowerCAmelCase ) _UpperCAmelCase = 1 _UpperCAmelCase = mask.reshape((self.rand_size, self.rand_size) ) _UpperCAmelCase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = torch.stack([example["""pixel_values"""] for example in examples] ) _UpperCAmelCase = torch.stack([example["""mask"""] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __UpperCAmelCase ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mim""" ,lowercase ,lowercase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(lowercase ) transformers.utils.logging.set_verbosity(lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _UpperCAmelCase = load_dataset( data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,) # If we don't have a validation split, split off a percentage of train as validation. _UpperCAmelCase = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split ,lowercase ) and data_args.train_val_split > 0.0: _UpperCAmelCase = ds["""train"""].train_test_split(data_args.train_val_split ) _UpperCAmelCase = split["""train"""] _UpperCAmelCase = split["""test"""] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name_or_path ,**lowercase ) elif model_args.model_name_or_path: _UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path ,**lowercase ) else: _UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(lowercase ,"""decoder_type""" ): _UpperCAmelCase = """simmim""" # adapt config _UpperCAmelCase = model_args.image_size if model_args.image_size is not None else config.image_size _UpperCAmelCase = model_args.patch_size if model_args.patch_size is not None else config.patch_size _UpperCAmelCase = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { """image_size""": model_args.image_size, """patch_size""": model_args.patch_size, """encoder_stride""": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**lowercase ) elif model_args.model_name_or_path: _UpperCAmelCase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**lowercase ) else: _UpperCAmelCase = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _UpperCAmelCase = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _UpperCAmelCase = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,) else: logger.info("""Training new model from scratch""" ) _UpperCAmelCase = AutoModelForMaskedImageModeling.from_config(lowercase ) if training_args.do_train: _UpperCAmelCase = ds["""train"""].column_names else: _UpperCAmelCase = ds["""validation"""].column_names if data_args.image_column_name is not None: _UpperCAmelCase = data_args.image_column_name elif "image" in column_names: _UpperCAmelCase = """image""" elif "img" in column_names: _UpperCAmelCase = """img""" else: _UpperCAmelCase = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _UpperCAmelCase = Compose( [ Lambda(lambda lowercase : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ), ] ) # create mask generator _UpperCAmelCase = MaskGenerator( input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,) def preprocess_images(lowercase ): _UpperCAmelCase = [transforms(lowercase ) for image in examples[image_column_name]] _UpperCAmelCase = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _UpperCAmelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _UpperCAmelCase = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase ) # Initialize our trainer _UpperCAmelCase = Trainer( model=lowercase ,args=lowercase ,train_dataset=ds["""train"""] if training_args.do_train else None ,eval_dataset=ds["""validation"""] if training_args.do_eval else None ,tokenizer=lowercase ,data_collator=lowercase ,) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=lowercase ) trainer.save_model() trainer.log_metrics("""train""" ,train_result.metrics ) trainer.save_metrics("""train""" ,train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics("""eval""" ,lowercase ) trainer.save_metrics("""eval""" ,lowercase ) # Write model card and (optionally) push to hub _UpperCAmelCase = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """masked-image-modeling""", """dataset""": data_args.dataset_name, """tags""": ["""masked-image-modeling"""], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase ) else: trainer.create_model_card(**lowercase ) if __name__ == "__main__": main()
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class a ( lowerCAmelCase_ ): _snake_case : str = 'altclip_text_model' def __init__( self : str , __lowerCAmelCase : int=25_0002 , __lowerCAmelCase : int=1024 , __lowerCAmelCase : str=24 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Optional[int]=4096 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=514 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Optional[int]=1e-0_5 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : int="absolute" , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Union[str, Any]=768 , **__lowerCAmelCase : Optional[int] , ): super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = initializer_factor _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = project_dim class a ( lowerCAmelCase_ ): _snake_case : List[Any] = 'altclip_vision_model' def __init__( self : Any , __lowerCAmelCase : List[str]=768 , __lowerCAmelCase : Optional[int]=3072 , __lowerCAmelCase : Optional[Any]=512 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : List[Any]=224 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[str]="quick_gelu" , __lowerCAmelCase : str=1e-5 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : Dict=0.02 , __lowerCAmelCase : Optional[int]=1.0 , **__lowerCAmelCase : Tuple , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = projection_dim _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = image_size _UpperCAmelCase = initializer_range _UpperCAmelCase = initializer_factor _UpperCAmelCase = attention_dropout _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = hidden_act @classmethod def lowerCAmelCase_ ( cls : Optional[int] , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : str ): cls._set_token_in_kwargs(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("""model_type""" ) == "altclip": _UpperCAmelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class a ( lowerCAmelCase_ ): _snake_case : Dict = 'altclip' _snake_case : Union[str, Any] = True def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Dict=2.6_592 , **__lowerCAmelCase : List[str] ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). _UpperCAmelCase = kwargs.pop("""text_config_dict""" , __lowerCAmelCase ) _UpperCAmelCase = kwargs.pop("""vision_config_dict""" , __lowerCAmelCase ) super().__init__(**__lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: _UpperCAmelCase = {} # This is the complete result when using `text_config_dict`. _UpperCAmelCase = AltCLIPTextConfig(**__lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: _UpperCAmelCase = ( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: _UpperCAmelCase = ( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(__lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: _UpperCAmelCase = {} # This is the complete result when using `vision_config_dict`. _UpperCAmelCase = AltCLIPVisionConfig(**__lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: _UpperCAmelCase = { str(__lowerCAmelCase ): value for key, value in _vision_config_dict["""id2label"""].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: _UpperCAmelCase = ( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: _UpperCAmelCase = ( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(__lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: _UpperCAmelCase = {} logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" ) if vision_config is None: _UpperCAmelCase = {} logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" ) _UpperCAmelCase = AltCLIPTextConfig(**__lowerCAmelCase ) _UpperCAmelCase = AltCLIPVisionConfig(**__lowerCAmelCase ) _UpperCAmelCase = projection_dim _UpperCAmelCase = logit_scale_init_value _UpperCAmelCase = 1.0 @classmethod def lowerCAmelCase_ ( cls : List[str] , __lowerCAmelCase : AltCLIPTextConfig , __lowerCAmelCase : AltCLIPVisionConfig , **__lowerCAmelCase : List[str] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.text_config.to_dict() _UpperCAmelCase = self.vision_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = BitConfig( global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,) _UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(lowercase ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(lowercase ).unsqueeze(0 ) _UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase ,lowercase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(lowercase ) _UpperCAmelCase = outputs.logits print("""Predicted class:""" ,logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 ) else: _UpperCAmelCase = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
1
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class a ( lowerCAmelCase_ ): _snake_case : Tuple = '' _snake_case : List[str] = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[DatasetInfo] = None , __lowerCAmelCase : Optional[str] = None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(self , **__lowerCAmelCase ) _UpperCAmelCase = repo_info _UpperCAmelCase = token _UpperCAmelCase = None def lowerCAmelCase_ ( self : int ): if self.dir_cache is None: _UpperCAmelCase = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _UpperCAmelCase = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__lowerCAmelCase ): {"""name""": str(__lowerCAmelCase ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str = "rb" , **__lowerCAmelCase : List[Any] , ): if not isinstance(self.repo_info , __lowerCAmelCase ): raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) _UpperCAmelCase = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha ) return fsspec.open( __lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any , **__lowerCAmelCase : Dict ): self._get_dirs() _UpperCAmelCase = self._strip_protocol(__lowerCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=False , **__lowerCAmelCase : Tuple ): self._get_dirs() _UpperCAmelCase = PurePosixPath(path.strip("""/""" ) ) _UpperCAmelCase = {} for p, f in self.dir_cache.items(): _UpperCAmelCase = PurePosixPath(p.strip("""/""" ) ) _UpperCAmelCase = p.parent if root == path: _UpperCAmelCase = f _UpperCAmelCase = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["""DeiTFeatureExtractor"""] UpperCAmelCase__ = ["""DeiTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DeiTForImageClassification""", """DeiTForImageClassificationWithTeacher""", """DeiTForMaskedImageModeling""", """DeiTModel""", """DeiTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDeiTForImageClassification""", """TFDeiTForImageClassificationWithTeacher""", """TFDeiTForMaskedImageModeling""", """TFDeiTModel""", """TFDeiTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase = 3 ,lowercase = 7 ,lowercase = 1_00_00_00 ): """simple docstring""" _UpperCAmelCase = 0 _UpperCAmelCase = 1 for current_denominator in range(1 ,limit + 1 ): _UpperCAmelCase = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _UpperCAmelCase = current_numerator _UpperCAmelCase = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
30
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase__ = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] UpperCAmelCase__ = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] UpperCAmelCase__ = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): UpperCAmelCase__ = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" def __UpperCAmelCase ( lowercase ): """simple docstring""" # authorize twitter, initialize tweepy _UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase ) auth.set_access_token(lowercase ,lowercase ) _UpperCAmelCase = tweepy.API(lowercase ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 ) # save most recent tweets alltweets.extend(lowercase ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=lowercase ,count=2_00 ,max_id=lowercase ) # save most recent tweets alltweets.extend(lowercase ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'''...{len(lowercase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f: _UpperCAmelCase = csv.writer(lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
30
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""] _UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments) UpperCAmelCase__ = parser.parse_args() if args.num_workers is None: UpperCAmelCase__ = multiprocessing.cpu_count() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase__ = time.time() UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() UpperCAmelCase__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _UpperCAmelCase = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _UpperCAmelCase = 1 - (matter_density + radiation_density + dark_energy) _UpperCAmelCase = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _UpperCAmelCase = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation UpperCAmelCase__ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
30
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
1
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class a ( lowerCAmelCase_ ): _snake_case : Dict = 'data2vec-audio' def __init__( self : Any , __lowerCAmelCase : str=32 , __lowerCAmelCase : List[Any]=768 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : str=3072 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase : str=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : int=False , __lowerCAmelCase : str=16 , __lowerCAmelCase : Any=19 , __lowerCAmelCase : Union[str, Any]=5 , __lowerCAmelCase : Tuple=0.05 , __lowerCAmelCase : List[str]=10 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[Any]=10 , __lowerCAmelCase : Union[str, Any]=0 , __lowerCAmelCase : Union[str, Any]="sum" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Tuple=256 , __lowerCAmelCase : Dict=(512, 512, 512, 512, 1500) , __lowerCAmelCase : Union[str, Any]=(5, 3, 3, 1, 1) , __lowerCAmelCase : Union[str, Any]=(1, 2, 3, 1, 1) , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : List[Any]=1 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : List[str] , ): super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = feat_extract_activation _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = conv_bias _UpperCAmelCase = num_conv_pos_embeddings _UpperCAmelCase = num_conv_pos_embedding_groups _UpperCAmelCase = conv_pos_kernel_size _UpperCAmelCase = len(self.conv_dim ) _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout _UpperCAmelCase = attention_dropout _UpperCAmelCase = activation_dropout _UpperCAmelCase = feat_proj_dropout _UpperCAmelCase = final_dropout _UpperCAmelCase = layerdrop _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = vocab_size _UpperCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase = mask_time_prob _UpperCAmelCase = mask_time_length _UpperCAmelCase = mask_time_min_masks _UpperCAmelCase = mask_feature_prob _UpperCAmelCase = mask_feature_length _UpperCAmelCase = mask_feature_min_masks # ctc loss _UpperCAmelCase = ctc_loss_reduction _UpperCAmelCase = ctc_zero_infinity # adapter _UpperCAmelCase = add_adapter _UpperCAmelCase = adapter_kernel_size _UpperCAmelCase = adapter_stride _UpperCAmelCase = num_adapter_layers _UpperCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = xvector_output_dim @property def lowerCAmelCase_ ( self : int ): return math.prod(self.conv_stride )
30
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __UpperCAmelCase ( lowercase=None ,lowercase=None ): """simple docstring""" return field(default_factory=lambda: default ,metadata=lowercase ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The csv file to plot.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) _snake_case : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def __UpperCAmelCase ( lowercase ): """simple docstring""" try: int(lowercase ) return True except ValueError: return False def __UpperCAmelCase ( lowercase ): """simple docstring""" try: float(lowercase ) return True except ValueError: return False class a : def __init__( self : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = args _UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _UpperCAmelCase = csv.DictReader(__lowerCAmelCase ) for row in reader: _UpperCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _UpperCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _UpperCAmelCase = float(row["""result"""] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = plt.subplots() _UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _UpperCAmelCase = self.result_dict[model_name]["""result"""] ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: _UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" ) title_str += f''' {label_model_name} vs.''' _UpperCAmelCase = title_str[:-4] _UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser(lowercase ) _UpperCAmelCase = parser.parse_args_into_dataclasses()[0] _UpperCAmelCase = Plot(args=lowercase ) plot.plot() if __name__ == "__main__": main()
30
1
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging UpperCAmelCase__ = logging.get_logger(__name__) class a : _snake_case : str _snake_case : str = None @staticmethod def lowerCAmelCase_ ( ): raise NotImplementedError def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ): raise NotImplementedError def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] ): raise NotImplementedError def lowerCAmelCase_ ( self : List[Any] ): if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCAmelCase_ ( cls : List[str] ): return f'''`pip install {cls.pip_package or cls.name}`''' class a ( lowerCAmelCase_ ): _snake_case : Any = 'optuna' @staticmethod def lowerCAmelCase_ ( ): return is_optuna_available() def lowerCAmelCase_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ): return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] ): return default_hp_space_optuna(__lowerCAmelCase ) class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = 'ray' _snake_case : Dict = '\'ray[tune]\'' @staticmethod def lowerCAmelCase_ ( ): return is_ray_available() def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : List[Any] ): return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] ): return default_hp_space_ray(__lowerCAmelCase ) class a ( lowerCAmelCase_ ): _snake_case : Optional[Any] = 'sigopt' @staticmethod def lowerCAmelCase_ ( ): return is_sigopt_available() def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : int ): return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple ): return default_hp_space_sigopt(__lowerCAmelCase ) class a ( lowerCAmelCase_ ): _snake_case : Optional[Any] = 'wandb' @staticmethod def lowerCAmelCase_ ( ): return is_wandb_available() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : str , **__lowerCAmelCase : List[str] ): return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict ): return default_hp_space_wandb(__lowerCAmelCase ) UpperCAmelCase__ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowercase ) > 0: _UpperCAmelCase = available_backends[0].name if len(lowercase ) > 1: logger.info( f'''{len(lowercase )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
30
"""simple docstring""" import os import pytest from attr import dataclass UpperCAmelCase__ = """us-east-1""" # defaults region @dataclass class a : _snake_case : str _snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' _snake_case : List[Any] = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_00, 'save_steps': 55_00, } _snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00} @property def lowerCAmelCase_ ( self : Optional[Any] ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase_ ( self : Dict ): return f'''{self.framework}-transfromers-test''' @property def lowerCAmelCase_ ( self : Union[str, Any] ): return f'''./tests/sagemaker/scripts/{self.framework}''' @property def lowerCAmelCase_ ( self : Dict ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
30
1
"""simple docstring""" import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a ( unittest.TestCase ): def __init__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=7 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Dict=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=[0.5, 0.5, 0.5] , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , ): _UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std def lowerCAmelCase_ ( self : Dict ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Union[str, Any] = DPTImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = DPTImageProcessingTester(self ) @property def lowerCAmelCase_ ( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCAmelCase_ ( self : Union[str, Any] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase_ ( self : List[str] ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def lowerCAmelCase_ ( self : str ): # Initialize image_processing _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor ) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched _UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
30
"""simple docstring""" import string from math import logaa def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = document.translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" ) _UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = corpus.lower().translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("""\n""" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowercase )) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) ,3 ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return round(tf * idf ,3 )
30
1
"""simple docstring""" import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger UpperCAmelCase__ = get_logger(__name__) UpperCAmelCase__ = r""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class a : @add_start_docstrings(__lowerCAmelCase ) def __call__( self : Dict , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class a : @add_start_docstrings(__lowerCAmelCase ) def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class a ( lowerCAmelCase_ ): @add_start_docstrings(__lowerCAmelCase ) def __call__( self : List[str] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int , **__lowerCAmelCase : str ): for processor in self: _UpperCAmelCase = inspect.signature(processor.__call__ ).parameters if len(__lowerCAmelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) _UpperCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) else: _UpperCAmelCase = processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return scores class a ( lowerCAmelCase_ ): def __init__( self : List[str] , __lowerCAmelCase : float ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) _UpperCAmelCase = temperature def __call__( self : Tuple , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): _UpperCAmelCase = scores / self.temperature return scores class a ( lowerCAmelCase_ ): def __init__( self : Dict , __lowerCAmelCase : float , __lowerCAmelCase : float = -float("""Inf""" ) , __lowerCAmelCase : int = 1 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) _UpperCAmelCase = top_p _UpperCAmelCase = filter_value _UpperCAmelCase = min_tokens_to_keep def __call__( self : int , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): _UpperCAmelCase , _UpperCAmelCase = lax.top_k(__lowerCAmelCase , scores.shape[-1] ) _UpperCAmelCase = jnp.full_like(__lowerCAmelCase , self.filter_value ) _UpperCAmelCase = jax.nn.softmax(__lowerCAmelCase , axis=-1 ).cumsum(axis=-1 ) _UpperCAmelCase = cumulative_probs < self.top_p # include the token that is higher than top_p as well _UpperCAmelCase = jnp.roll(__lowerCAmelCase , 1 ) score_mask |= score_mask.at[:, 0].set(__lowerCAmelCase ) # min tokens to keep _UpperCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__lowerCAmelCase ) _UpperCAmelCase = jnp.where(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jax.lax.sort_key_val(__lowerCAmelCase , __lowerCAmelCase )[-1] return next_scores class a ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : float = -float("""Inf""" ) , __lowerCAmelCase : int = 1 ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) _UpperCAmelCase = max(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = filter_value def __call__( self : int , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): _UpperCAmelCase , _UpperCAmelCase = scores.shape _UpperCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value ) _UpperCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check _UpperCAmelCase , _UpperCAmelCase = lax.top_k(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.broadcast_to((jnp.arange(__lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() _UpperCAmelCase = topk_scores.flatten() _UpperCAmelCase = topk_indices.flatten() + shift _UpperCAmelCase = next_scores_flat.at[topk_indices_flat].set(__lowerCAmelCase ) _UpperCAmelCase = next_scores_flat.reshape(__lowerCAmelCase , __lowerCAmelCase ) return next_scores class a ( lowerCAmelCase_ ): def __init__( self : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = bos_token_id def __call__( self : int , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): _UpperCAmelCase = jnp.full(scores.shape , -float("""inf""" ) ) _UpperCAmelCase = 1 - jnp.bool_(cur_len - 1 ) _UpperCAmelCase = jnp.where(__lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __lowerCAmelCase ) return scores class a ( lowerCAmelCase_ ): def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ): _UpperCAmelCase = max_length _UpperCAmelCase = eos_token_id def __call__( self : Dict , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): _UpperCAmelCase = jnp.full(scores.shape , -float("""inf""" ) ) _UpperCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 ) _UpperCAmelCase = jnp.where(__lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __lowerCAmelCase ) return scores class a ( lowerCAmelCase_ ): def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) _UpperCAmelCase = min_length _UpperCAmelCase = eos_token_id def __call__( self : Dict , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): # create boolean flag to decide if min length penalty should be applied _UpperCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) _UpperCAmelCase = jnp.where(__lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , __lowerCAmelCase ) return scores class a ( lowerCAmelCase_ ): def __init__( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = begin_index def __call__( self : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int ): _UpperCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index ) _UpperCAmelCase = jnp.where(__lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , __lowerCAmelCase ) return scores class a ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , __lowerCAmelCase : list ): _UpperCAmelCase = list(__lowerCAmelCase ) def __call__( self : List[Any] , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): _UpperCAmelCase = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) ) return scores class a ( lowerCAmelCase_ ): def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = dict(__lowerCAmelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. _UpperCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: _UpperCAmelCase = force_token_array.at[index].set(__lowerCAmelCase ) _UpperCAmelCase = jnp.intaa(__lowerCAmelCase ) def __call__( self : Tuple , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : jnp.ndarray , __lowerCAmelCase : int ): def _force_token(__lowerCAmelCase : Tuple ): _UpperCAmelCase = scores.shape[0] _UpperCAmelCase = self.force_token_array[generation_idx] _UpperCAmelCase = jnp.ones_like(__lowerCAmelCase , dtype=scores.dtype ) * -float("""inf""" ) _UpperCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) _UpperCAmelCase = lax.dynamic_update_slice(__lowerCAmelCase , __lowerCAmelCase , (0, current_token) ) return new_scores _UpperCAmelCase = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(__lowerCAmelCase ) , lambda: scores , ) , ) return scores class a ( lowerCAmelCase_ ): def __init__( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ): _UpperCAmelCase = generate_config.eos_token_id _UpperCAmelCase = generate_config.no_timestamps_token_id _UpperCAmelCase = generate_config.no_timestamps_token_id + 1 _UpperCAmelCase = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(__lowerCAmelCase , """max_initial_timestamp_index""" ): _UpperCAmelCase = generate_config.max_initial_timestamp_index else: _UpperCAmelCase = model_config.vocab_size if self.max_initial_timestamp_index is None: _UpperCAmelCase = model_config.vocab_size def __call__( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ): # suppress <|notimestamps|> which is handled by without_timestamps _UpperCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) ) def handle_pairs(__lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __lowerCAmelCase , ) _UpperCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , __lowerCAmelCase , __lowerCAmelCase , ) return jnp.where( __lowerCAmelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , __lowerCAmelCase , ) _UpperCAmelCase = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.where(cur_len == self.begin_index , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __lowerCAmelCase , ) _UpperCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index _UpperCAmelCase = jnp.where( __lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , __lowerCAmelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp _UpperCAmelCase = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 ) def handle_cumulative_probs(__lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ): _UpperCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) _UpperCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , __lowerCAmelCase , ) _UpperCAmelCase = jax.vmap(__lowerCAmelCase )(__lowerCAmelCase , __lowerCAmelCase ) return scores
30
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) else: _UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) _UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""] _UpperCAmelCase = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: _UpperCAmelCase = key.split(""".""" ) if attributes[0] == "lm_head": _UpperCAmelCase = prophet _UpperCAmelCase = prophet_old else: _UpperCAmelCase = prophet.prophetnet _UpperCAmelCase = prophet_old.model _UpperCAmelCase = False for attribute in attributes: if attribute in mapping: _UpperCAmelCase = mapping[attribute] if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0: _UpperCAmelCase = attribute elif hasattr(lowercase ,lowercase ): _UpperCAmelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _UpperCAmelCase = old_model.weight logger.info(f'''{attribute} is initialized.''' ) _UpperCAmelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _UpperCAmelCase = old_model.bias logger.info(f'''{attribute} is initialized''' ) _UpperCAmelCase = True break elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ): _UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3 _UpperCAmelCase = getattr(lowercase ,lowercase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _UpperCAmelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." _UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) _UpperCAmelCase = True break if attribute.isdigit(): _UpperCAmelCase = model[int(lowercase )] _UpperCAmelCase = old_model[int(lowercase )] else: _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_attribute == "": _UpperCAmelCase = old_model else: if not hasattr(lowercase ,lowercase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) _UpperCAmelCase = getattr(lowercase ,lowercase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
30
1
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase__ = logging.get_logger(__name__) # General docstring UpperCAmelCase__ = """RegNetConfig""" # Base docstring UpperCAmelCase__ = """facebook/regnet-y-040""" UpperCAmelCase__ = [1, 1_0_8_8, 7, 7] # Image classification docstring UpperCAmelCase__ = """facebook/regnet-y-040""" UpperCAmelCase__ = """tabby, tabby cat""" UpperCAmelCase__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class a ( tf.keras.layers.Layer ): def __init__( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[str] = "relu" , **__lowerCAmelCase : Optional[Any] , ): super().__init__(**__lowerCAmelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb _UpperCAmelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) _UpperCAmelCase = tf.keras.layers.ConvaD( filters=__lowerCAmelCase , kernel_size=__lowerCAmelCase , strides=__lowerCAmelCase , padding="""VALID""" , groups=__lowerCAmelCase , use_bias=__lowerCAmelCase , name="""convolution""" , ) _UpperCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" ) _UpperCAmelCase = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] ): _UpperCAmelCase = self.convolution(self.padding(__lowerCAmelCase ) ) _UpperCAmelCase = self.normalization(__lowerCAmelCase ) _UpperCAmelCase = self.activation(__lowerCAmelCase ) return hidden_state class a ( tf.keras.layers.Layer ): def __init__( self : Optional[int] , __lowerCAmelCase : RegNetConfig , **__lowerCAmelCase : Dict ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = config.num_channels _UpperCAmelCase = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[int] ): _UpperCAmelCase = shape_list(__lowerCAmelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( """Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) _UpperCAmelCase = tf.transpose(__lowerCAmelCase , perm=(0, 2, 3, 1) ) _UpperCAmelCase = self.embedder(__lowerCAmelCase ) return hidden_state class a ( tf.keras.layers.Layer ): def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , **__lowerCAmelCase : List[str] ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = tf.keras.layers.ConvaD( filters=__lowerCAmelCase , kernel_size=1 , strides=__lowerCAmelCase , use_bias=__lowerCAmelCase , name="""convolution""" ) _UpperCAmelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : tf.Tensor , __lowerCAmelCase : bool = False ): return self.normalization(self.convolution(__lowerCAmelCase ) , training=__lowerCAmelCase ) class a ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , **__lowerCAmelCase : int ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCAmelCase , name="""pooler""" ) _UpperCAmelCase = [ tf.keras.layers.ConvaD(filters=__lowerCAmelCase , kernel_size=1 , activation="""relu""" , name="""attention.0""" ), tf.keras.layers.ConvaD(filters=__lowerCAmelCase , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ), ] def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[int] ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] _UpperCAmelCase = self.pooler(__lowerCAmelCase ) for layer_module in self.attention: _UpperCAmelCase = layer_module(__lowerCAmelCase ) _UpperCAmelCase = hidden_state * pooled return hidden_state class a ( tf.keras.layers.Layer ): def __init__( self : Any , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : List[str] ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = max(1 , out_channels // config.groups_width ) _UpperCAmelCase = ( TFRegNetShortCut(__lowerCAmelCase , stride=__lowerCAmelCase , name="""shortcut""" ) if should_apply_shortcut else tf.keras.layers.Activation("""linear""" , name="""shortcut""" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. _UpperCAmelCase = [ TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ), TFRegNetConvLayer( __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act , name="""layer.1""" ), TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase , name="""layer.2""" ), ] _UpperCAmelCase = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = hidden_state for layer_module in self.layers: _UpperCAmelCase = layer_module(__lowerCAmelCase ) _UpperCAmelCase = self.shortcut(__lowerCAmelCase ) hidden_state += residual _UpperCAmelCase = self.activation(__lowerCAmelCase ) return hidden_state class a ( tf.keras.layers.Layer ): def __init__( self : Tuple , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : List[Any] ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = in_channels != out_channels or stride != 1 _UpperCAmelCase = max(1 , out_channels // config.groups_width ) _UpperCAmelCase = ( TFRegNetShortCut(__lowerCAmelCase , stride=__lowerCAmelCase , name="""shortcut""" ) if should_apply_shortcut else tf.keras.layers.Activation("""linear""" , name="""shortcut""" ) ) _UpperCAmelCase = [ TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ), TFRegNetConvLayer( __lowerCAmelCase , stride=__lowerCAmelCase , groups=__lowerCAmelCase , activation=config.hidden_act , name="""layer.1""" ), TFRegNetSELayer(__lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ), TFRegNetConvLayer(__lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase , name="""layer.3""" ), ] _UpperCAmelCase = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = hidden_state for layer_module in self.layers: _UpperCAmelCase = layer_module(__lowerCAmelCase ) _UpperCAmelCase = self.shortcut(__lowerCAmelCase ) hidden_state += residual _UpperCAmelCase = self.activation(__lowerCAmelCase ) return hidden_state class a ( tf.keras.layers.Layer ): def __init__( self : Any , __lowerCAmelCase : RegNetConfig , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , **__lowerCAmelCase : Dict ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer _UpperCAmelCase = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , name="""layers.0""" ), *[layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Optional[Any] ): for layer_module in self.layers: _UpperCAmelCase = layer_module(__lowerCAmelCase ) return hidden_state class a ( tf.keras.layers.Layer ): def __init__( self : str , __lowerCAmelCase : RegNetConfig , **__lowerCAmelCase : Optional[Any] ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) ) _UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCAmelCase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase , name=f'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : tf.Tensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ): _UpperCAmelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) _UpperCAmelCase = stage_module(__lowerCAmelCase ) if output_hidden_states: _UpperCAmelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase ) @keras_serializable class a ( tf.keras.layers.Layer ): _snake_case : List[Any] = RegNetConfig def __init__( self : Optional[int] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[int] ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = config _UpperCAmelCase = TFRegNetEmbeddings(__lowerCAmelCase , name="""embedder""" ) _UpperCAmelCase = TFRegNetEncoder(__lowerCAmelCase , name="""encoder""" ) _UpperCAmelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCAmelCase , name="""pooler""" ) @unpack_inputs def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : tf.Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , ): _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.embedder(__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = self.encoder( __lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = encoder_outputs[0] _UpperCAmelCase = self.pooler(__lowerCAmelCase ) # Change to NCHW output format have uniformity in the modules _UpperCAmelCase = tf.transpose(__lowerCAmelCase , perm=(0, 3, 1, 2) ) _UpperCAmelCase = tf.transpose(__lowerCAmelCase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: _UpperCAmelCase = tuple([tf.transpose(__lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class a ( lowerCAmelCase_ ): _snake_case : List[str] = RegNetConfig _snake_case : Optional[Any] = 'regnet' _snake_case : Union[str, Any] = 'pixel_values' @property def lowerCAmelCase_ ( self : Any ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} UpperCAmelCase__ = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCAmelCase__ = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase_ , ) class a ( lowerCAmelCase_ ): def __init__( self : int , __lowerCAmelCase : RegNetConfig , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[str] ): super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = TFRegNetMainLayer(__lowerCAmelCase , name="""regnet""" ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : tf.Tensor , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Any=False , ): _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.regnet( pixel_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , training=__lowerCAmelCase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase_ , ) class a ( lowerCAmelCase_ , lowerCAmelCase_ ): def __init__( self : Dict , __lowerCAmelCase : RegNetConfig , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : List[Any] ): super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = TFRegNetMainLayer(__lowerCAmelCase , name="""regnet""" ) # classification head _UpperCAmelCase = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : tf.Tensor = None , __lowerCAmelCase : tf.Tensor = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : str=False , ): _UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase = self.regnet( __lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase , training=__lowerCAmelCase ) _UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCAmelCase = self.classifier[0](__lowerCAmelCase ) _UpperCAmelCase = self.classifier[1](__lowerCAmelCase ) _UpperCAmelCase = None if labels is None else self.hf_compute_loss(labels=__lowerCAmelCase , logits=__lowerCAmelCase ) if not return_dict: _UpperCAmelCase = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
30
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class a : def lowerCAmelCase_ ( self : Dict ): torch.manual_seed(0 ) _UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) _UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowerCAmelCase_ ( self : int ): torch.manual_seed(0 ) _UpperCAmelCase = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" ) torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ """ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D""", ] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , ) torch.manual_seed(0 ) _UpperCAmelCase = DDPMScheduler( num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_001 , beta_end=0.02 , ) torch.manual_seed(0 ) _UpperCAmelCase = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase ) _UpperCAmelCase = inputs["""prompt"""] _UpperCAmelCase = inputs["""generator"""] _UpperCAmelCase = inputs["""num_inference_steps"""] _UpperCAmelCase = inputs["""output_type"""] if "image" in inputs: _UpperCAmelCase = inputs["""image"""] else: _UpperCAmelCase = None if "mask_image" in inputs: _UpperCAmelCase = inputs["""mask_image"""] else: _UpperCAmelCase = None if "original_image" in inputs: _UpperCAmelCase = inputs["""original_image"""] else: _UpperCAmelCase = None _UpperCAmelCase , _UpperCAmelCase = pipe.encode_prompt(__lowerCAmelCase ) # inputs with prompt converted to embeddings _UpperCAmelCase = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: _UpperCAmelCase = image if mask_image is not None: _UpperCAmelCase = mask_image if original_image is not None: _UpperCAmelCase = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = pipe(**__lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = self.pipeline_class.from_pretrained(__lowerCAmelCase ) pipe_loaded.to(__lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(__lowerCAmelCase , __lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase ) _UpperCAmelCase = inputs["""generator"""] _UpperCAmelCase = inputs["""num_inference_steps"""] _UpperCAmelCase = inputs["""output_type"""] # inputs with prompt converted to embeddings _UpperCAmelCase = { """prompt_embeds""": prompt_embeds, """negative_prompt_embeds""": negative_prompt_embeds, """generator""": generator, """num_inference_steps""": num_inference_steps, """output_type""": output_type, } if image is not None: _UpperCAmelCase = image if mask_image is not None: _UpperCAmelCase = mask_image if original_image is not None: _UpperCAmelCase = original_image _UpperCAmelCase = pipe_loaded(**__lowerCAmelCase )[0] _UpperCAmelCase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max() self.assertLess(__lowerCAmelCase , 1e-4 ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase ) _UpperCAmelCase = pipe(**__lowerCAmelCase )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = self.pipeline_class.from_pretrained(__lowerCAmelCase ) pipe_loaded.to(__lowerCAmelCase ) pipe_loaded.set_progress_bar_config(disable=__lowerCAmelCase ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase ) _UpperCAmelCase = pipe_loaded(**__lowerCAmelCase )[0] _UpperCAmelCase = np.abs(to_np(__lowerCAmelCase ) - to_np(__lowerCAmelCase ) ).max() self.assertLess(__lowerCAmelCase , 1e-4 )
30
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'upernet' def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config.get("""model_type""" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__lowerCAmelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" if not isinstance(lowercase ,lowercase ): raise TypeError("""Input value must be an 'int' type""" ) _UpperCAmelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" from itertools import product def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(lowercase ,max_face_number + 1 ) for dice_numbers in product(lowercase ,repeat=lowercase ): _UpperCAmelCase = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
30
1
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () UpperCAmelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). UpperCAmelCase__ = [0, 2_5, 5_0] UpperCAmelCase__ = [2_5, 5_0, 7_5] UpperCAmelCase__ = fuzz.membership.trimf(X, abca) UpperCAmelCase__ = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. UpperCAmelCase__ = np.ones(7_5) UpperCAmelCase__ = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) UpperCAmelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) UpperCAmelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) UpperCAmelCase__ = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) UpperCAmelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] UpperCAmelCase__ = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) UpperCAmelCase__ = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] UpperCAmelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] UpperCAmelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
30
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[Any] = 'vision-encoder-decoder' _snake_case : Optional[int] = True def __init__( self : int , **__lowerCAmelCase : Any ): super().__init__(**__lowerCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _UpperCAmelCase = kwargs.pop("""encoder""" ) _UpperCAmelCase = encoder_config.pop("""model_type""" ) _UpperCAmelCase = kwargs.pop("""decoder""" ) _UpperCAmelCase = decoder_config.pop("""model_type""" ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = True @classmethod def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ): logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _UpperCAmelCase = True _UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.encoder.to_dict() _UpperCAmelCase = self.decoder.to_dict() _UpperCAmelCase = self.__class__.model_type return output class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : Tuple ): return 1e-4 @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = OrderedDict() _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ): import torch _UpperCAmelCase = OrderedDict() _UpperCAmelCase = super().generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape _UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) _UpperCAmelCase = dummy_input.pop("""input_ids""" ) _UpperCAmelCase = dummy_input.pop("""attention_mask""" ) _UpperCAmelCase = torch.zeros(__lowerCAmelCase ) return common_inputs class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ): _UpperCAmelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
30
1
"""simple docstring""" import string import numpy def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return b if a == 0 else greatest_common_divisor(b % a ,lowercase ) class a : _snake_case : List[Any] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _snake_case : List[Any] = numpy.vectorize(lambda lowerCAmelCase_ : x % 36 ) _snake_case : Union[str, Any] = numpy.vectorize(lowerCAmelCase_ ) def __init__( self : Union[str, Any] , __lowerCAmelCase : numpy.ndarray ): _UpperCAmelCase = self.modulus(__lowerCAmelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _UpperCAmelCase = encrypt_key.shape[0] def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str ): return self.key_string.index(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): return self.key_string[round(__lowerCAmelCase )] def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _UpperCAmelCase = det % len(self.key_string ) _UpperCAmelCase = len(self.key_string ) if greatest_common_divisor(__lowerCAmelCase , len(self.key_string ) ) != 1: _UpperCAmelCase = ( f'''determinant modular {req_l} of encryption key({det}) ''' f'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : str ): _UpperCAmelCase = [char for char in text.upper() if char in self.key_string] _UpperCAmelCase = chars[-1] while len(__lowerCAmelCase ) % self.break_key != 0: chars.append(__lowerCAmelCase ) return "".join(__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = self.process_text(text.upper() ) _UpperCAmelCase = """""" for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ): _UpperCAmelCase = text[i : i + self.break_key] _UpperCAmelCase = [self.replace_letters(__lowerCAmelCase ) for char in batch] _UpperCAmelCase = numpy.array([vec] ).T _UpperCAmelCase = self.modulus(self.encrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[ 0 ] _UpperCAmelCase = """""".join( self.replace_digits(__lowerCAmelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _UpperCAmelCase = det % len(self.key_string ) _UpperCAmelCase = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _UpperCAmelCase = i break _UpperCAmelCase = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__lowerCAmelCase ) ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str ): _UpperCAmelCase = self.make_decrypt_key() _UpperCAmelCase = self.process_text(text.upper() ) _UpperCAmelCase = """""" for i in range(0 , len(__lowerCAmelCase ) - self.break_key + 1 , self.break_key ): _UpperCAmelCase = text[i : i + self.break_key] _UpperCAmelCase = [self.replace_letters(__lowerCAmelCase ) for char in batch] _UpperCAmelCase = numpy.array([vec] ).T _UpperCAmelCase = self.modulus(decrypt_key.dot(__lowerCAmelCase ) ).T.tolist()[0] _UpperCAmelCase = """""".join( self.replace_digits(__lowerCAmelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = int(input("""Enter the order of the encryption key: """ ) ) _UpperCAmelCase = [] print("""Enter each row of the encryption key with space separated integers""" ) for _ in range(lowercase ): _UpperCAmelCase = [int(lowercase ) for x in input().split()] hill_matrix.append(lowercase ) _UpperCAmelCase = HillCipher(numpy.array(lowercase ) ) print("""Would you like to encrypt or decrypt some text? (1 or 2)""" ) _UpperCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" ) if option == "1": _UpperCAmelCase = input("""What text would you like to encrypt?: """ ) print("""Your encrypted text is:""" ) print(hc.encrypt(lowercase ) ) elif option == "2": _UpperCAmelCase = input("""What text would you like to decrypt?: """ ) print("""Your decrypted text is:""" ) print(hc.decrypt(lowercase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
30
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) UpperCAmelCase__ = CLIPImageProcessor() UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") UpperCAmelCase__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
30
1
"""simple docstring""" import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class a ( lowerCAmelCase_ ): def __init__( self : Tuple , __lowerCAmelCase : Tuple=0.01 , __lowerCAmelCase : Dict=1000 ): _UpperCAmelCase = p_stop _UpperCAmelCase = max_length def __iter__( self : int ): _UpperCAmelCase = 0 _UpperCAmelCase = False while not stop and count < self.max_length: yield count count += 1 _UpperCAmelCase = random.random() < self.p_stop class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=False , __lowerCAmelCase : Tuple=True ): _UpperCAmelCase = [ BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 ) ] _UpperCAmelCase = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): # Check the shards when the dataset is a round multiple of total batch size. _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): # Check the shards when the dataset is a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): # Check the shards when the dataset is a round multiple of total batch size. _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): # Check the shards when the dataset is a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) # Check the shards when the dataset is very small. _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[[0, 1]], []] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) _UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = [[], []] self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _UpperCAmelCase = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=False , __lowerCAmelCase : int=2 , __lowerCAmelCase : Tuple=False ): random.seed(__lowerCAmelCase ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = [ IterableDatasetShard( __lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , ) for i in range(__lowerCAmelCase ) ] _UpperCAmelCase = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(__lowerCAmelCase ) iterable_dataset_lists.append(list(__lowerCAmelCase ) ) _UpperCAmelCase = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _UpperCAmelCase = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 ) _UpperCAmelCase = [] for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(__lowerCAmelCase ) < len(__lowerCAmelCase ): reference += reference self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = 42 _UpperCAmelCase = RandomIterableDataset() self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) # Edge case with a very small dataset _UpperCAmelCase = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase ) _UpperCAmelCase = SkipBatchSampler(__lowerCAmelCase , 2 ) self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = DataLoader(list(range(16 ) ) , batch_size=4 ) _UpperCAmelCase = skip_first_batches(__lowerCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(__lowerCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__lowerCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCAmelCase_ ( self : Optional[int] ): Accelerator() _UpperCAmelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(__lowerCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(__lowerCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
30
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __UpperCAmelCase ( *lowercase ): """simple docstring""" if not isinstance(lowercase ,lowercase ): _UpperCAmelCase = list(lowercase ) for i in range(len(lowercase ) ): _UpperCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ): """simple docstring""" if function is None: return functools.partial(lowercase ,starting_batch_size=lowercase ) _UpperCAmelCase = starting_batch_size def decorator(*lowercase ,**lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() ) # Guard against user error if len(lowercase ) < (len(lowercase ) + 1): _UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowercase ,*lowercase ,**lowercase ) except Exception as e: if should_reduce_batch_size(lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
30
1
"""simple docstring""" import os def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = os.path.join(os.path.dirname(lowercase ) ,"""num.txt""" ) with open(lowercase ) as file_hand: return str(sum(int(lowercase ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
30
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : Dict = ( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Dict = False _snake_case : List[str] = False def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : int ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = [1, 6, 3_0522] self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
30
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = tempfile.mkdtemp() # fmt: off _UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) _UpperCAmelCase = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } _UpperCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ): return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple , **__lowerCAmelCase : List[Any] ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCAmelCase = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _UpperCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) _UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""" ) _UpperCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = """lower newer""" _UpperCAmelCase = processor(text=__lowerCAmelCase ) _UpperCAmelCase = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = """lower newer""" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(__lowerCAmelCase ): processor() def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase = processor.batch_decode(__lowerCAmelCase ) _UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.get_image_processor() _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) _UpperCAmelCase = """lower newer""" _UpperCAmelCase = self.prepare_image_inputs() _UpperCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
30
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class a ( lowerCAmelCase_ ): _snake_case : int = 'van' def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = mlp_ratios _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = dropout_rate
30
1
"""simple docstring""" from __future__ import annotations from collections.abc import MutableSequence class a : def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : MutableSequence[float] ): if len(__lowerCAmelCase ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) _UpperCAmelCase = list(__lowerCAmelCase ) _UpperCAmelCase = degree def __add__( self : List[str] , __lowerCAmelCase : Polynomial ): if self.degree > polynomial_a.degree: _UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __lowerCAmelCase ) else: _UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __lowerCAmelCase ) def __sub__( self : Union[str, Any] , __lowerCAmelCase : Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Dict ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Any , __lowerCAmelCase : Polynomial ): _UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int | float ): _UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Dict ): _UpperCAmelCase = """""" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCAmelCase ) return polynomial def __repr__( self : List[str] ): return self.__str__() def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = [0] * self.degree for i in range(self.degree ): _UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int | float = 0 ): _UpperCAmelCase = [0] * (self.degree + 2) _UpperCAmelCase = constant for i in range(self.degree + 1 ): _UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __lowerCAmelCase ) def __eq__( self : List[str] , __lowerCAmelCase : object ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Tuple , __lowerCAmelCase : object ): return not self.__eq__(__lowerCAmelCase )
30
"""simple docstring""" def __UpperCAmelCase ( lowercase = 10_00 ): """simple docstring""" _UpperCAmelCase = 2**power _UpperCAmelCase = 0 while n: _UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return 1 if input_a == input_a else 0 def __UpperCAmelCase ( ): """simple docstring""" assert xnor_gate(0 ,0 ) == 1 assert xnor_gate(0 ,1 ) == 0 assert xnor_gate(1 ,0 ) == 0 assert xnor_gate(1 ,1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
30
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if config is None: assert isinstance(self.model , __lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if self.optimizer is None: _UpperCAmelCase = ["""bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , ) else: _UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase ) return scheduler def lowerCAmelCase_ ( self : Optional[int] ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = inputs.pop("""labels""" ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return loss def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ): _UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase ) _UpperCAmelCase = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) _UpperCAmelCase = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): # If PAD token is not defined at least EOS token has to be defined _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
30
1
"""simple docstring""" import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ): return f'''gaussian_noise_s={seed}_shape={"_".join([str(__lowerCAmelCase ) for s in shape] )}.npy''' def lowerCAmelCase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : int=(4, 4, 64, 64) , __lowerCAmelCase : Dict=False ): _UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) , dtype=__lowerCAmelCase ) return image def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[str]="CompVis/stable-diffusion-v1-4" ): _UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _UpperCAmelCase = """bf16""" if fpaa else None _UpperCAmelCase , _UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained( __lowerCAmelCase , subfolder="""unet""" , dtype=__lowerCAmelCase , revision=__lowerCAmelCase ) return model, params def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=(4, 77, 768) , __lowerCAmelCase : Any=False ): _UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) , dtype=__lowerCAmelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=__lowerCAmelCase ) _UpperCAmelCase = self.get_latents(__lowerCAmelCase , fpaa=__lowerCAmelCase ) _UpperCAmelCase = self.get_encoder_hidden_states(__lowerCAmelCase , fpaa=__lowerCAmelCase ) _UpperCAmelCase = model.apply( {"""params""": params} , __lowerCAmelCase , jnp.array(__lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCAmelCase , ).sample assert sample.shape == latents.shape _UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCAmelCase = jnp.array(__lowerCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : int ): _UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=__lowerCAmelCase ) _UpperCAmelCase = self.get_latents(__lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=__lowerCAmelCase ) _UpperCAmelCase = self.get_encoder_hidden_states(__lowerCAmelCase , shape=(4, 77, 1024) , fpaa=__lowerCAmelCase ) _UpperCAmelCase = model.apply( {"""params""": params} , __lowerCAmelCase , jnp.array(__lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__lowerCAmelCase , ).sample assert sample.shape == latents.shape _UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCAmelCase = jnp.array(__lowerCAmelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 )
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" import argparse import os import re UpperCAmelCase__ = """src/transformers/models/auto""" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict UpperCAmelCase__ = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""") # re pattern that matches identifiers in mappings UpperCAmelCase__ = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""") def __UpperCAmelCase ( lowercase ,lowercase = False ): """simple docstring""" with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f: _UpperCAmelCase = f.read() _UpperCAmelCase = content.split("""\n""" ) _UpperCAmelCase = [] _UpperCAmelCase = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: _UpperCAmelCase = len(re.search(R"""^(\s*)\S""" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(""" """ * indent + """(""" ): new_lines.append(lines[line_idx] ) line_idx += 1 _UpperCAmelCase = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": _UpperCAmelCase = line_idx while not lines[line_idx].startswith(""" """ * indent + """)""" ): line_idx += 1 blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers _UpperCAmelCase = sorted(lowercase ,key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f: f.write("""\n""".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def __UpperCAmelCase ( lowercase = False ): """simple docstring""" _UpperCAmelCase = [os.path.join(lowercase ,lowercase ) for f in os.listdir(lowercase ) if f.endswith(""".py""" )] _UpperCAmelCase = [sort_auto_mapping(lowercase ,overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): _UpperCAmelCase = [f for f, d in zip(lowercase ,lowercase ) if d] raise ValueError( f'''The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix''' """ this.""" ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") UpperCAmelCase__ = parser.parse_args() sort_all_auto_mappings(not args.check_only)
30
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = BitConfig( global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,) _UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(lowercase ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(lowercase ).unsqueeze(0 ) _UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase ,lowercase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(lowercase ) _UpperCAmelCase = outputs.logits print("""Predicted class:""" ,logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 ) else: _UpperCAmelCase = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" ) _UpperCAmelCase = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids _UpperCAmelCase = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids _UpperCAmelCase = model(input_ids.to(__lowerCAmelCase ) , labels=labels.to(__lowerCAmelCase ) ).loss _UpperCAmelCase = -(labels.shape[-1] * loss.item()) _UpperCAmelCase = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ = { """configuration_layoutlmv3""": [ """LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv3Config""", """LayoutLMv3OnnxConfig""", ], """processing_layoutlmv3""": ["""LayoutLMv3Processor"""], """tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["""LayoutLMv3TokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv3ForQuestionAnswering""", """LayoutLMv3ForSequenceClassification""", """LayoutLMv3ForTokenClassification""", """LayoutLMv3Model""", """LayoutLMv3PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLayoutLMv3ForQuestionAnswering""", """TFLayoutLMv3ForSequenceClassification""", """TFLayoutLMv3ForTokenClassification""", """TFLayoutLMv3Model""", """TFLayoutLMv3PreTrainedModel""", ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["""LayoutLMv3FeatureExtractor"""] UpperCAmelCase__ = ["""LayoutLMv3ImageProcessor"""] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
30
1
"""simple docstring""" from ....utils import logging UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): def __init__( self : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : int=2048 ): _UpperCAmelCase = config.__dict__ _UpperCAmelCase = modal_hidden_size if num_labels: _UpperCAmelCase = num_labels
30
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class a : def __init__( self : str , __lowerCAmelCase : Any ): _UpperCAmelCase = data _UpperCAmelCase = None class a : def __init__( self : int ): _UpperCAmelCase = None _UpperCAmelCase = None def __iter__( self : Dict ): _UpperCAmelCase = self.head while self.head: yield node.data _UpperCAmelCase = node.next if node == self.head: break def __len__( self : List[Any] ): return sum(1 for _ in self ) def __repr__( self : int ): return "->".join(str(__lowerCAmelCase ) for item in iter(self ) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Any ): self.insert_nth(len(self ) , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Any ): self.insert_nth(0 , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ): if index < 0 or index > len(self ): raise IndexError("""list index out of range.""" ) _UpperCAmelCase = Node(__lowerCAmelCase ) if self.head is None: _UpperCAmelCase = new_node # first node points itself _UpperCAmelCase = _UpperCAmelCase = new_node elif index == 0: # insert at head _UpperCAmelCase = self.head _UpperCAmelCase = _UpperCAmelCase = new_node else: _UpperCAmelCase = self.head for _ in range(index - 1 ): _UpperCAmelCase = temp.next _UpperCAmelCase = temp.next _UpperCAmelCase = new_node if index == len(self ) - 1: # insert at tail _UpperCAmelCase = new_node def lowerCAmelCase_ ( self : Optional[Any] ): return self.delete_nth(0 ) def lowerCAmelCase_ ( self : Union[str, Any] ): return self.delete_nth(len(self ) - 1 ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int = 0 ): if not 0 <= index < len(self ): raise IndexError("""list index out of range.""" ) _UpperCAmelCase = self.head if self.head == self.tail: # just one node _UpperCAmelCase = _UpperCAmelCase = None elif index == 0: # delete head node _UpperCAmelCase = self.tail.next.next _UpperCAmelCase = self.head.next else: _UpperCAmelCase = self.head for _ in range(index - 1 ): _UpperCAmelCase = temp.next _UpperCAmelCase = temp.next _UpperCAmelCase = temp.next.next if index == len(self ) - 1: # delete at tail _UpperCAmelCase = temp return delete_node.data def lowerCAmelCase_ ( self : int ): return len(self ) == 0 def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = CircularLinkedList() assert len(lowercase ) == 0 assert circular_linked_list.is_empty() is True assert str(lowercase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(lowercase ) == i circular_linked_list.insert_nth(lowercase ,i + 1 ) assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 ,6 ) ) circular_linked_list.insert_tail(6 ) assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 ,7 ) ) circular_linked_list.insert_head(0 ) assert str(lowercase ) == "->".join(str(lowercase ) for i in range(0 ,7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 ,6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 ,3 ) assert str(lowercase ) == "->".join(str(lowercase ) for i in range(1 ,6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" def __UpperCAmelCase ( lowercase ): """simple docstring""" # authorize twitter, initialize tweepy _UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase ) auth.set_access_token(lowercase ,lowercase ) _UpperCAmelCase = tweepy.API(lowercase ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 ) # save most recent tweets alltweets.extend(lowercase ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=lowercase ,count=2_00 ,max_id=lowercase ) # save most recent tweets alltweets.extend(lowercase ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'''...{len(lowercase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f: _UpperCAmelCase = csv.writer(lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
30
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class a ( metaclass=lowerCAmelCase_ ): _snake_case : int = ['flax'] def __init__( self : Dict , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : List[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Any , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Dict = ['flax'] def __init__( self : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Optional[int] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Any ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Dict , *__lowerCAmelCase : str , **__lowerCAmelCase : str ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Optional[Any] = ['flax'] def __init__( self : Optional[Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Optional[Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[str] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Optional[Any] = ['flax'] def __init__( self : str , *__lowerCAmelCase : str , **__lowerCAmelCase : Optional[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : int , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Dict = ['flax'] def __init__( self : Tuple , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Dict ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : List[str] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Any = ['flax'] def __init__( self : str , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Union[str, Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Dict , *__lowerCAmelCase : int , **__lowerCAmelCase : Tuple ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Dict , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : Dict ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Tuple = ['flax'] def __init__( self : Tuple , *__lowerCAmelCase : Dict , **__lowerCAmelCase : List[str] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : List[str] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Any ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Optional[Any] = ['flax'] def __init__( self : Any , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : Optional[int] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[int] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Any = ['flax'] def __init__( self : Dict , *__lowerCAmelCase : str , **__lowerCAmelCase : Optional[Any] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : str , **__lowerCAmelCase : Any ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[str] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : List[Any] = ['flax'] def __init__( self : Tuple , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Any ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Dict , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : List[str] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Optional[int] ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : List[str] = ['flax'] def __init__( self : Optional[int] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Any ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Dict , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Any ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : Optional[Any] = ['flax'] def __init__( self : Any , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Optional[int] ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : str ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : int , *__lowerCAmelCase : str , **__lowerCAmelCase : int ): requires_backends(cls , ["""flax"""] ) class a ( metaclass=lowerCAmelCase_ ): _snake_case : List[str] = ['flax'] def __init__( self : Optional[Any] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Dict ): requires_backends(self , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Tuple , *__lowerCAmelCase : int , **__lowerCAmelCase : int ): requires_backends(cls , ["""flax"""] ) @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , *__lowerCAmelCase : Dict , **__lowerCAmelCase : Union[str, Any] ): requires_backends(cls , ["""flax"""] )
30
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""] _UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments) UpperCAmelCase__ = parser.parse_args() if args.num_workers is None: UpperCAmelCase__ = multiprocessing.cpu_count() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase__ = time.time() UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() UpperCAmelCase__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = len(lowercase ) print("""The following activities are selected:""" ) # The first activity is always selected _UpperCAmelCase = 0 print(lowercase ,end=""",""" ) # Consider rest of the activities for j in range(lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(lowercase ,end=""",""" ) _UpperCAmelCase = j if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = [1, 3, 0, 5, 8, 5] UpperCAmelCase__ = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
30
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
1
"""simple docstring""" import math def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [True] * n _UpperCAmelCase = False _UpperCAmelCase = False _UpperCAmelCase = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): _UpperCAmelCase = i * 2 while index < n: _UpperCAmelCase = False _UpperCAmelCase = index + i _UpperCAmelCase = [2] for i in range(3 ,lowercase ,2 ): if is_prime[i]: primes.append(lowercase ) return primes def __UpperCAmelCase ( lowercase = 99_99_66_66_33_33 ): """simple docstring""" _UpperCAmelCase = math.floor(math.sqrt(lowercase ) ) + 1_00 _UpperCAmelCase = prime_sieve(lowercase ) _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = primes[prime_index] while (last_prime**2) <= limit: _UpperCAmelCase = primes[prime_index + 1] _UpperCAmelCase = last_prime**2 _UpperCAmelCase = next_prime**2 # Get numbers divisible by lps(current) _UpperCAmelCase = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) _UpperCAmelCase = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps _UpperCAmelCase = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair _UpperCAmelCase = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
30
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __UpperCAmelCase ( lowercase=None ,lowercase=None ): """simple docstring""" return field(default_factory=lambda: default ,metadata=lowercase ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The csv file to plot.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) _snake_case : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def __UpperCAmelCase ( lowercase ): """simple docstring""" try: int(lowercase ) return True except ValueError: return False def __UpperCAmelCase ( lowercase ): """simple docstring""" try: float(lowercase ) return True except ValueError: return False class a : def __init__( self : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = args _UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _UpperCAmelCase = csv.DictReader(__lowerCAmelCase ) for row in reader: _UpperCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _UpperCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _UpperCAmelCase = float(row["""result"""] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = plt.subplots() _UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _UpperCAmelCase = self.result_dict[model_name]["""result"""] ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: _UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" ) title_str += f''' {label_model_name} vs.''' _UpperCAmelCase = title_str[:-4] _UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser(lowercase ) _UpperCAmelCase = parser.parse_args_into_dataclasses()[0] _UpperCAmelCase = Plot(args=lowercase ) plot.plot() if __name__ == "__main__": main()
30
1
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""", """facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : int = 'encodec' def __init__( self : int , __lowerCAmelCase : str=[1.5, 3.0, 6.0, 12.0, 24.0] , __lowerCAmelCase : Union[str, Any]=2_4000 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int=128 , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : int=1 , __lowerCAmelCase : List[Any]=[8, 5, 4, 2] , __lowerCAmelCase : List[str]="weight_norm" , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]="reflect" , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[Any]=1.0 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=True , **__lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = target_bandwidths _UpperCAmelCase = sampling_rate _UpperCAmelCase = audio_channels _UpperCAmelCase = normalize _UpperCAmelCase = chunk_length_s _UpperCAmelCase = overlap _UpperCAmelCase = hidden_size _UpperCAmelCase = num_filters _UpperCAmelCase = num_residual_layers _UpperCAmelCase = upsampling_ratios _UpperCAmelCase = norm_type _UpperCAmelCase = kernel_size _UpperCAmelCase = last_kernel_size _UpperCAmelCase = residual_kernel_size _UpperCAmelCase = dilation_growth_rate _UpperCAmelCase = use_causal_conv _UpperCAmelCase = pad_mode _UpperCAmelCase = compress _UpperCAmelCase = num_lstm_layers _UpperCAmelCase = trim_right_ratio _UpperCAmelCase = codebook_size _UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size _UpperCAmelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**__lowerCAmelCase ) @property def lowerCAmelCase_ ( self : Optional[int] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowerCAmelCase_ ( self : Tuple ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def lowerCAmelCase_ ( self : List[str] ): return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
30
"""simple docstring""" import os import pytest from attr import dataclass UpperCAmelCase__ = """us-east-1""" # defaults region @dataclass class a : _snake_case : str _snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' _snake_case : List[Any] = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_00, 'save_steps': 55_00, } _snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00} @property def lowerCAmelCase_ ( self : Optional[Any] ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase_ ( self : Dict ): return f'''{self.framework}-transfromers-test''' @property def lowerCAmelCase_ ( self : Union[str, Any] ): return f'''./tests/sagemaker/scripts/{self.framework}''' @property def lowerCAmelCase_ ( self : Dict ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" for i in range(len(lowercase ) - 1 ,0 ,-1 ): _UpperCAmelCase = False for j in range(lowercase ,0 ,-1 ): if unsorted[j] < unsorted[j - 1]: _UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j] _UpperCAmelCase = True for j in range(lowercase ): if unsorted[j] > unsorted[j + 1]: _UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j] _UpperCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(F'''{cocktail_shaker_sort(unsorted) = }''')
30
"""simple docstring""" import string from math import logaa def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = document.translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" ) _UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = corpus.lower().translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("""\n""" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowercase )) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) ,3 ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return round(tf * idf ,3 )
30
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a ( unittest.TestCase ): @property def lowerCAmelCase_ ( self : str ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.dummy_uncond_unet _UpperCAmelCase = KarrasVeScheduler() _UpperCAmelCase = KarrasVePipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(num_inference_steps=2 , generator=__lowerCAmelCase , output_type="""numpy""" ).images _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(num_inference_steps=2 , generator=__lowerCAmelCase , output_type="""numpy""" , return_dict=__lowerCAmelCase )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = """google/ncsnpp-celebahq-256""" _UpperCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase = KarrasVeScheduler() _UpperCAmelCase = KarrasVePipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(num_inference_steps=20 , generator=__lowerCAmelCase , output_type="""numpy""" ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _UpperCAmelCase = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
30
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) else: _UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) _UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""] _UpperCAmelCase = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: _UpperCAmelCase = key.split(""".""" ) if attributes[0] == "lm_head": _UpperCAmelCase = prophet _UpperCAmelCase = prophet_old else: _UpperCAmelCase = prophet.prophetnet _UpperCAmelCase = prophet_old.model _UpperCAmelCase = False for attribute in attributes: if attribute in mapping: _UpperCAmelCase = mapping[attribute] if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0: _UpperCAmelCase = attribute elif hasattr(lowercase ,lowercase ): _UpperCAmelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _UpperCAmelCase = old_model.weight logger.info(f'''{attribute} is initialized.''' ) _UpperCAmelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _UpperCAmelCase = old_model.bias logger.info(f'''{attribute} is initialized''' ) _UpperCAmelCase = True break elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ): _UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3 _UpperCAmelCase = getattr(lowercase ,lowercase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _UpperCAmelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." _UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) _UpperCAmelCase = True break if attribute.isdigit(): _UpperCAmelCase = model[int(lowercase )] _UpperCAmelCase = old_model[int(lowercase )] else: _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_attribute == "": _UpperCAmelCase = old_model else: if not hasattr(lowercase ,lowercase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) _UpperCAmelCase = getattr(lowercase ,lowercase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
30
1
"""simple docstring""" def __UpperCAmelCase ( lowercase = 1_00 ): """simple docstring""" _UpperCAmelCase = (n * (n + 1) // 2) ** 2 _UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
30
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a : def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : List[str]=30 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Tuple=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Union[str, Any]=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCAmelCase = (image_size // patch_size) ** 2 _UpperCAmelCase = num_patches + 1 def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Optional[int] ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = ViTMSNModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = model(__lowerCAmelCase , labels=__lowerCAmelCase ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = ViTMSNForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _snake_case : int = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) _snake_case : Optional[Any] = False _snake_case : int = False _snake_case : List[str] = False _snake_case : int = False def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = ViTMSNModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def lowerCAmelCase_ ( self : List[Any] ): pass def lowerCAmelCase_ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__lowerCAmelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = ViTMSNModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def lowerCAmelCase_ ( self : str ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self : str ): torch.manual_seed(2 ) _UpperCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(__lowerCAmelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__lowerCAmelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) _UpperCAmelCase = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
30
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'upernet' def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config.get("""model_type""" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__lowerCAmelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
1
"""simple docstring""" from itertools import product def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(lowercase ,max_face_number + 1 ) for dice_numbers in product(lowercase ,repeat=lowercase ): _UpperCAmelCase = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
30
"""simple docstring""" from itertools import product def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(lowercase ,max_face_number + 1 ) for dice_numbers in product(lowercase ,repeat=lowercase ): _UpperCAmelCase = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = total_frequency_distribution( sides_number=4 ,dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 ,dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(lowercase ,max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(lowercase ,ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
30
1
"""simple docstring""" import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = MobileNetVaConfig(layer_norm_eps=0.0_01 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) _UpperCAmelCase = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" ,lowercase ) if matches: _UpperCAmelCase = float(matches[1] ) _UpperCAmelCase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". _UpperCAmelCase = 10_01 _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ) + 1: v for k, v in idalabel.items()} _UpperCAmelCase = """background""" _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = get_mobilenet_va_config(lowercase ) # Load 🤗 model _UpperCAmelCase = MobileNetVaForImageClassification(lowercase ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowercase ,lowercase ,lowercase ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor _UpperCAmelCase = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} ,size={"""shortest_edge""": config.image_size + 32} ,) _UpperCAmelCase = image_processor(images=prepare_img() ,return_tensors="""pt""" ) _UpperCAmelCase = model(**lowercase ) _UpperCAmelCase = outputs.logits assert logits.shape == (1, 10_01) if model_name == "mobilenet_v1_1.0_224": _UpperCAmelCase = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ) elif model_name == "mobilenet_v1_0.75_192": _UpperCAmelCase = torch.tensor([-3.94_40, -2.31_41, -0.33_33] ) else: _UpperCAmelCase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] ,lowercase ,atol=1E-4 ) Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase ) if push_to_hub: print("""Pushing to the hub...""" ) _UpperCAmelCase = """google/""" + model_name image_processor.push_to_hub(lowercase ) model.push_to_hub(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""mobilenet_v1_1.0_224""", type=str, help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""", ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
30
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[Any] = 'vision-encoder-decoder' _snake_case : Optional[int] = True def __init__( self : int , **__lowerCAmelCase : Any ): super().__init__(**__lowerCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) _UpperCAmelCase = kwargs.pop("""encoder""" ) _UpperCAmelCase = encoder_config.pop("""model_type""" ) _UpperCAmelCase = kwargs.pop("""decoder""" ) _UpperCAmelCase = decoder_config.pop("""model_type""" ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = True @classmethod def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ): logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) _UpperCAmelCase = True _UpperCAmelCase = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.encoder.to_dict() _UpperCAmelCase = self.decoder.to_dict() _UpperCAmelCase = self.__class__.model_type return output class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = version.parse('1.11' ) @property def lowerCAmelCase_ ( self : int ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCAmelCase_ ( self : Tuple ): return 1e-4 @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = OrderedDict() _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""} _UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ): import torch _UpperCAmelCase = OrderedDict() _UpperCAmelCase = super().generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape _UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size) _UpperCAmelCase = dummy_input.pop("""input_ids""" ) _UpperCAmelCase = dummy_input.pop("""attention_mask""" ) _UpperCAmelCase = torch.zeros(__lowerCAmelCase ) return common_inputs class a ( lowerCAmelCase_ ): @property def lowerCAmelCase_ ( self : Tuple ): pass def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ): return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ): _UpperCAmelCase = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
30
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a ( lowerCAmelCase_ , unittest.TestCase ): _snake_case : Union[str, Any] = UnCLIPImageVariationPipeline _snake_case : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} _snake_case : int = IMAGE_VARIATION_BATCH_PARAMS _snake_case : Dict = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] _snake_case : Dict = False @property def lowerCAmelCase_ ( self : Optional[int] ): return 32 @property def lowerCAmelCase_ ( self : Tuple ): return 32 @property def lowerCAmelCase_ ( self : str ): return self.time_input_dim @property def lowerCAmelCase_ ( self : Dict ): return self.time_input_dim * 4 @property def lowerCAmelCase_ ( self : Optional[int] ): return 100 @property def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCAmelCase_ ( self : Dict ): torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__lowerCAmelCase ) @property def lowerCAmelCase_ ( self : Dict ): torch.manual_seed(0 ) _UpperCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(__lowerCAmelCase ) @property def lowerCAmelCase_ ( self : Optional[int] ): torch.manual_seed(0 ) _UpperCAmelCase = { """clip_embeddings_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """cross_attention_dim""": self.cross_attention_dim, } _UpperCAmelCase = UnCLIPTextProjModel(**__lowerCAmelCase ) return model @property def lowerCAmelCase_ ( self : Optional[int] ): torch.manual_seed(0 ) _UpperCAmelCase = { """sample_size""": 32, # RGB in channels """in_channels""": 3, # Out channels is double in channels because predicts mean and variance """out_channels""": 6, """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": """identity""", } _UpperCAmelCase = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def lowerCAmelCase_ ( self : List[Any] ): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def lowerCAmelCase_ ( self : Any ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def lowerCAmelCase_ ( self : int ): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1 ) _UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs ) return model def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.dummy_decoder _UpperCAmelCase = self.dummy_text_proj _UpperCAmelCase = self.dummy_text_encoder _UpperCAmelCase = self.dummy_tokenizer _UpperCAmelCase = self.dummy_super_res_first _UpperCAmelCase = self.dummy_super_res_last _UpperCAmelCase = UnCLIPScheduler( variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , ) _UpperCAmelCase = UnCLIPScheduler( variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , ) _UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 ) _UpperCAmelCase = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : str=True ): _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) if str(__lowerCAmelCase ).startswith("""mps""" ): _UpperCAmelCase = torch.manual_seed(__lowerCAmelCase ) else: _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) if pil_image: _UpperCAmelCase = input_image * 0.5 + 0.5 _UpperCAmelCase = input_image.clamp(0 , 1 ) _UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _UpperCAmelCase = DiffusionPipeline.numpy_to_pil(__lowerCAmelCase )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = """cpu""" _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = pipe(**__lowerCAmelCase ) _UpperCAmelCase = output.images _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = pipe( **__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array( [ 0.9_997, 0.0_002, 0.9_997, 0.9_997, 0.9_969, 0.0_023, 0.9_997, 0.9_969, 0.9_970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = """cpu""" _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = pipe(**__lowerCAmelCase ) _UpperCAmelCase = output.images _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = pipe( **__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = """cpu""" _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = [ pipeline_inputs["""image"""], pipeline_inputs["""image"""], ] _UpperCAmelCase = pipe(**__lowerCAmelCase ) _UpperCAmelCase = output.images _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = [ tuple_pipeline_inputs["""image"""], tuple_pipeline_inputs["""image"""], ] _UpperCAmelCase = pipe( **__lowerCAmelCase , return_dict=__lowerCAmelCase , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) _UpperCAmelCase = np.array( [ 0.9_997, 0.9_989, 0.0_008, 0.0_021, 0.9_960, 0.0_018, 0.0_014, 0.0_002, 0.9_933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch.device("""cpu""" ) class a : _snake_case : Union[str, Any] = 1 _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase ) _UpperCAmelCase = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) _UpperCAmelCase = pipe.decoder.dtype _UpperCAmelCase = 1 _UpperCAmelCase = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) _UpperCAmelCase = pipe.prepare_latents( __lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler() ) _UpperCAmelCase = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) _UpperCAmelCase = pipe.prepare_latents( __lowerCAmelCase , dtype=__lowerCAmelCase , device=__lowerCAmelCase , generator=__lowerCAmelCase , latents=__lowerCAmelCase , scheduler=DummyScheduler() ) _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) _UpperCAmelCase = pipe( **__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase ).images _UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase , pil_image=__lowerCAmelCase ) # Don't pass image, instead pass embedding _UpperCAmelCase = pipeline_inputs.pop("""image""" ) _UpperCAmelCase = pipe.image_encoder(__lowerCAmelCase ).image_embeds _UpperCAmelCase = pipe( **__lowerCAmelCase , decoder_latents=__lowerCAmelCase , super_res_latents=__lowerCAmelCase , image_embeddings=__lowerCAmelCase , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = torch_device == """cpu""" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor _UpperCAmelCase = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=__lowerCAmelCase , expected_max_diff=__lowerCAmelCase ) @skip_mps def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = torch_device == """cpu""" _UpperCAmelCase = True _UpperCAmelCase = [ """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] self._test_inference_batch_single_identical( test_max_difference=__lowerCAmelCase , relax_max_difference=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = [ """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes _UpperCAmelCase = [2, 3] self._test_inference_batch_consistent( batch_sizes=__lowerCAmelCase , additional_params_copy_to_batched_inputs=__lowerCAmelCase , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=__lowerCAmelCase ) @skip_mps def lowerCAmelCase_ ( self : Any ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase_ ( self : Dict ): return super().test_save_load_local() @skip_mps def lowerCAmelCase_ ( self : List[Any] ): return super().test_save_load_optional_components() @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" ) _UpperCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" ) _UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained( """kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa ) _UpperCAmelCase = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) _UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase = pipeline( __lowerCAmelCase , generator=__lowerCAmelCase , output_type="""np""" , ) _UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase , 15 )
30
"""simple docstring""" import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) UpperCAmelCase__ = parser.parse_args() UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) UpperCAmelCase__ = CLIPImageProcessor() UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") UpperCAmelCase__ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
30
1
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py UpperCAmelCase__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. UpperCAmelCase__ = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") UpperCAmelCase__ = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase__ = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) UpperCAmelCase__ = [ ("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""), ("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""), ("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""), ("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""), ("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""), ("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""), ("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""), ("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""), ("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""), ( """zero-shot-object-detection""", """MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForZeroShotObjectDetection""", ), ("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""), ("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""), ("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""), ("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""), ( """table-question-answering""", """MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForTableQuestionAnswering""", ), ("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""), ("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""), ( """next-sentence-prediction""", """MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""", """AutoModelForNextSentencePrediction""", ), ( """audio-frame-classification""", """MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioFrameClassification""", ), ("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""), ( """document-question-answering""", """MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForDocumentQuestionAnswering""", ), ( """visual-question-answering""", """MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForVisualQuestionAnswering""", ), ("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""), ( """zero-shot-image-classification""", """MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForZeroShotImageClassification""", ), ("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""), ("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""), ("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""), ] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,lowercase ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _UpperCAmelCase = { config.replace("""Config""" ,"""""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. _UpperCAmelCase = collections.defaultdict(lowercase ) _UpperCAmelCase = collections.defaultdict(lowercase ) _UpperCAmelCase = collections.defaultdict(lowercase ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(lowercase ): _UpperCAmelCase = None if _re_tf_models.match(lowercase ) is not None: _UpperCAmelCase = tf_models _UpperCAmelCase = _re_tf_models.match(lowercase ).groups()[0] elif _re_flax_models.match(lowercase ) is not None: _UpperCAmelCase = flax_models _UpperCAmelCase = _re_flax_models.match(lowercase ).groups()[0] elif _re_pt_models.match(lowercase ) is not None: _UpperCAmelCase = pt_models _UpperCAmelCase = _re_pt_models.match(lowercase ).groups()[0] if lookup_dict is not None: while len(lowercase ) > 0: if attr_name in model_prefix_to_model_type: _UpperCAmelCase = True break # Try again after removing the last word in the name _UpperCAmelCase = """""".join(camel_case_split(lowercase )[:-1] ) _UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) _UpperCAmelCase = list(lowercase ) all_models.sort() _UpperCAmelCase = {"""model_type""": all_models} _UpperCAmelCase = [pt_models[t] for t in all_models] _UpperCAmelCase = [tf_models[t] for t in all_models] _UpperCAmelCase = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure _UpperCAmelCase = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: _UpperCAmelCase = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: _UpperCAmelCase = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: _UpperCAmelCase = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. _UpperCAmelCase = """AutoTokenizer""" _UpperCAmelCase = [processors[t] for t in all_models] return pd.DataFrame(lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: _UpperCAmelCase = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}'''] _UpperCAmelCase = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}'''] # Loop through all three frameworks for module, cls, mapping in zip(lowercase ,lowercase ,lowercase ): # The type of pipeline may not exist in this framework if not hasattr(lowercase ,lowercase ): continue # First extract all model_names _UpperCAmelCase = [] for name in getattr(lowercase ,lowercase ).values(): if isinstance(lowercase ,lowercase ): model_names.append(lowercase ) else: model_names.extend(list(lowercase ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = get_frameworks_table() _UpperCAmelCase = Dataset.from_pandas(lowercase ) _UpperCAmelCase = hf_hub_download( """huggingface/transformers-metadata""" ,"""pipeline_tags.json""" ,repo_type="""dataset""" ,token=lowercase ) _UpperCAmelCase = Dataset.from_json(lowercase ) _UpperCAmelCase = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(lowercase ) ) } _UpperCAmelCase = update_pipeline_and_auto_class_table(lowercase ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. _UpperCAmelCase = sorted(table.keys() ) _UpperCAmelCase = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) _UpperCAmelCase = Dataset.from_pandas(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(lowercase ,"""frameworks.json""" ) ) tags_dataset.to_json(os.path.join(lowercase ,"""pipeline_tags.json""" ) ) if commit_sha is not None: _UpperCAmelCase = ( f'''Update with commit {commit_sha}\n\nSee: ''' f'''https://github.com/huggingface/transformers/commit/{commit_sha}''' ) else: _UpperCAmelCase = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""" ,folder_path=lowercase ,repo_type="""dataset""" ,token=lowercase ,commit_message=lowercase ,) def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} _UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS _UpperCAmelCase = [] for key in pipeline_tasks: if key not in in_table: _UpperCAmelCase = pipeline_tasks[key]["""pt"""] if isinstance(lowercase ,(list, tuple) ): _UpperCAmelCase = model[0] _UpperCAmelCase = model.__name__ if model not in in_table.values(): missing.append(lowercase ) if len(lowercase ) > 0: _UpperCAmelCase = """, """.join(lowercase ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ f'''`utils/update_metadata.py`: {msg}. Please add them!''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""") parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""") parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""") UpperCAmelCase__ = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
30
"""simple docstring""" import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __UpperCAmelCase ( *lowercase ): """simple docstring""" if not isinstance(lowercase ,lowercase ): _UpperCAmelCase = list(lowercase ) for i in range(len(lowercase ) ): _UpperCAmelCase = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(lowercase ,lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __UpperCAmelCase ( lowercase = None ,lowercase = 1_28 ): """simple docstring""" if function is None: return functools.partial(lowercase ,starting_batch_size=lowercase ) _UpperCAmelCase = starting_batch_size def decorator(*lowercase ,**lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase = list(inspect.signature(lowercase ).parameters.keys() ) # Guard against user error if len(lowercase ) < (len(lowercase ) + 1): _UpperCAmelCase = """, """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] ,args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(lowercase ,*lowercase ,**lowercase ) except Exception as e: if should_reduce_batch_size(lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
30
1
"""simple docstring""" from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = """T5Config""" class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = 'mt5' _snake_case : Any = MTaConfig class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = 'mt5' _snake_case : int = MTaConfig class a ( lowerCAmelCase_ ): _snake_case : Tuple = 'mt5' _snake_case : Union[str, Any] = MTaConfig
30
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _snake_case : str = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _snake_case : Dict = ( { 'feature-extraction': TFMobileBertModel, 'fill-mask': TFMobileBertForMaskedLM, 'question-answering': TFMobileBertForQuestionAnswering, 'text-classification': TFMobileBertForSequenceClassification, 'token-classification': TFMobileBertForTokenClassification, 'zero-shot': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _snake_case : Dict = False _snake_case : List[str] = False def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=False ): _UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase ) if return_labels: if model_class in get_values(__lowerCAmelCase ): _UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=13 , __lowerCAmelCase : Any=7 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=99 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Tuple=37 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : int=512 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Dict=4 , __lowerCAmelCase : str=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = embedding_size def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = TFMobileBertModel(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = [input_ids, input_mask] _UpperCAmelCase = model(__lowerCAmelCase ) _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = TFMobileBertForMaskedLM(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ): _UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = TFMobileBertForPreTraining(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForSequenceClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = TFMobileBertForMultipleChoice(config=__lowerCAmelCase ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = TFMobileBertForTokenClassification(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] ): _UpperCAmelCase = TFMobileBertForQuestionAnswering(config=__lowerCAmelCase ) _UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} _UpperCAmelCase = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Any ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase ) @slow def lowerCAmelCase_ ( self : int ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _UpperCAmelCase = TFMobileBertModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) _UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase = model(__lowerCAmelCase )[0] _UpperCAmelCase = [1, 6, 3_0522] self.assertEqual(output.shape , __lowerCAmelCase ) _UpperCAmelCase = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 )
30
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = 'timesformer' def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=224 , __lowerCAmelCase : Union[str, Any]=16 , __lowerCAmelCase : str=3 , __lowerCAmelCase : int=8 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : int=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Union[str, Any]=1e-6 , __lowerCAmelCase : int=True , __lowerCAmelCase : Optional[int]="divided_space_time" , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[int] , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = num_frames _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = qkv_bias _UpperCAmelCase = attention_type _UpperCAmelCase = drop_path_rate
30
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class a ( lowerCAmelCase_ ): _snake_case : int = 'van' def __init__( self : Any , __lowerCAmelCase : Tuple=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Tuple=[7, 3, 3, 3] , __lowerCAmelCase : Dict=[4, 2, 2, 2] , __lowerCAmelCase : Optional[Any]=[64, 128, 320, 512] , __lowerCAmelCase : Optional[int]=[3, 3, 12, 3] , __lowerCAmelCase : Dict=[8, 8, 4, 4] , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[str]=1e-6 , __lowerCAmelCase : Optional[int]=1e-2 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.0 , **__lowerCAmelCase : Any , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = hidden_sizes _UpperCAmelCase = depths _UpperCAmelCase = mlp_ratios _UpperCAmelCase = hidden_act _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = drop_path_rate _UpperCAmelCase = dropout_rate
30
1
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = """ylacombe/bark-small""" _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = """en_speaker_1""" _UpperCAmelCase = """This is a test string""" _UpperCAmelCase = """speaker_embeddings_path.json""" _UpperCAmelCase = """speaker_embeddings""" def lowerCAmelCase_ ( self : Optional[int] , **__lowerCAmelCase : Dict ): return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Any ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BarkProcessor(tokenizer=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _UpperCAmelCase = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _UpperCAmelCase = 35 _UpperCAmelCase = 2 _UpperCAmelCase = 8 _UpperCAmelCase = { """semantic_prompt""": np.ones(__lowerCAmelCase ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _UpperCAmelCase = processor(text=self.input_string , voice_preset=__lowerCAmelCase ) _UpperCAmelCase = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _UpperCAmelCase = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = processor(text=self.input_string , voice_preset=__lowerCAmelCase ) _UpperCAmelCase = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = BarkProcessor(tokenizer=__lowerCAmelCase ) _UpperCAmelCase = processor(text=self.input_string ) _UpperCAmelCase = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
30
"""simple docstring""" def __UpperCAmelCase ( lowercase = 10_00 ): """simple docstring""" _UpperCAmelCase = 2**power _UpperCAmelCase = 0 while n: _UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
30
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : Any = ['pixel_values'] def __init__( self : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Tuple , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = size if size is not None else {"""shortest_edge""": 224} _UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) _UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 256, """width""": 256} _UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = resample _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_flip_channel_order def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) _UpperCAmelCase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase ) return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : int , ): _UpperCAmelCase = get_size_dict(__lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : str , ): return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ): return flip_channel_order(__lowerCAmelCase , data_format=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Optional[Any] , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase ) _UpperCAmelCase = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" ) _UpperCAmelCase = make_list_of_images(__lowerCAmelCase ) if not valid_images(__lowerCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__lowerCAmelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images] if do_center_crop: _UpperCAmelCase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: _UpperCAmelCase = [self.flip_channel_order(image=__lowerCAmelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images] _UpperCAmelCase = {"""pixel_values""": images} return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Tuple] = None ): _UpperCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(__lowerCAmelCase ): _UpperCAmelCase = target_sizes.numpy() _UpperCAmelCase = [] for idx in range(len(__lowerCAmelCase ) ): _UpperCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__lowerCAmelCase ) _UpperCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowerCAmelCase ) else: _UpperCAmelCase = logits.argmax(dim=1 ) _UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
30
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ): super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if config is None: assert isinstance(self.model , __lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' """ padding..""" ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : int ): if self.optimizer is None: _UpperCAmelCase = ["""bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {"""scale_parameter""": False, """relative_step""": False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , ) else: _UpperCAmelCase = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(__lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] ): _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase ) return scheduler def lowerCAmelCase_ ( self : Optional[int] ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ): _UpperCAmelCase = inputs.pop("""labels""" ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return loss def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : nn.Module , __lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __lowerCAmelCase : bool , __lowerCAmelCase : Optional[List[str]] = None , ): _UpperCAmelCase = self._prepare_inputs(__lowerCAmelCase ) _UpperCAmelCase = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) _UpperCAmelCase = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ): # If PAD token is not defined at least EOS token has to be defined _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
30
1
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( """compression_format, is_archive""" ,[ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _UpperCAmelCase = { """7z""": (seven_zip_file, SevenZipExtractor), """bz2""": (bza_file, BzipaExtractor), """gzip""": (gz_file, GzipExtractor), """lz4""": (lza_file, LzaExtractor), """tar""": (tar_file, TarExtractor), """xz""": (xz_file, XzExtractor), """zip""": (zip_file, ZipExtractor), """zstd""": (zstd_file, ZstdExtractor), } _UpperCAmelCase , _UpperCAmelCase = input_paths_and_base_extractors[compression_format] if input_path is None: _UpperCAmelCase = f'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase ) assert base_extractor.is_extractable(lowercase ) _UpperCAmelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""") base_extractor.extract(lowercase ,lowercase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _UpperCAmelCase = file_path.read_text(encoding="""utf-8""" ) else: _UpperCAmelCase = output_path.read_text(encoding="""utf-8""" ) _UpperCAmelCase = text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( """compression_format, is_archive""" ,[ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,): """simple docstring""" _UpperCAmelCase = { """7z""": seven_zip_file, """bz2""": bza_file, """gzip""": gz_file, """lz4""": lza_file, """tar""": tar_file, """xz""": xz_file, """zip""": zip_file, """zstd""": zstd_file, } _UpperCAmelCase = input_paths[compression_format] if input_path is None: _UpperCAmelCase = f'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(lowercase ) _UpperCAmelCase = Extractor.infer_extractor_format(lowercase ) assert extractor_format is not None _UpperCAmelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""") Extractor.extract(lowercase ,lowercase ,lowercase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _UpperCAmelCase = file_path.read_text(encoding="""utf-8""" ) else: _UpperCAmelCase = output_path.read_text(encoding="""utf-8""" ) _UpperCAmelCase = text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.fixture def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" import tarfile _UpperCAmelCase = tmp_path / """data_dot_dot""" directory.mkdir() _UpperCAmelCase = directory / """tar_file_with_dot_dot.tar""" with tarfile.TarFile(lowercase ,"""w""" ) as f: f.add(lowercase ,arcname=os.path.join("""..""" ,text_file.name ) ) return path @pytest.fixture def __UpperCAmelCase ( lowercase ): """simple docstring""" import tarfile _UpperCAmelCase = tmp_path / """data_sym_link""" directory.mkdir() _UpperCAmelCase = directory / """tar_file_with_sym_link.tar""" os.symlink("""..""" ,directory / """subdir""" ,target_is_directory=lowercase ) with tarfile.TarFile(lowercase ,"""w""" ) as f: f.add(str(directory / """subdir""" ) ,arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( """insecure_tar_file, error_log""" ,[("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = { """tar_file_with_dot_dot""": tar_file_with_dot_dot, """tar_file_with_sym_link""": tar_file_with_sym_link, } _UpperCAmelCase = insecure_tar_files[insecure_tar_file] _UpperCAmelCase = tmp_path / """extracted""" TarExtractor.extract(lowercase ,lowercase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def __UpperCAmelCase ( lowercase ): """simple docstring""" # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number _UpperCAmelCase = tmpdir / """not_a_zip_file""" # From: https://github.com/python/cpython/pull/5053 _UpperCAmelCase = ( B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00""" B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I""" B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07""" B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82""" ) with not_a_zip_file.open("""wb""" ) as f: f.write(lowercase ) assert zipfile.is_zipfile(str(lowercase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(lowercase ) # but we're right
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" class a : # Public class to implement a graph def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[list[bool]] ): _UpperCAmelCase = row _UpperCAmelCase = col _UpperCAmelCase = graph def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[list[bool]] ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[list[bool]] ): # Checking all 8 elements surrounding nth element _UpperCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order _UpperCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1] _UpperCAmelCase = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __lowerCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): # And finally, count all islands. _UpperCAmelCase = [[False for j in range(self.COL )] for i in range(self.ROW )] _UpperCAmelCase = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) count += 1 return count
30
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = [] # fmt: off # stem: rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") ) rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") ) rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") ) # backbone rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") ) rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) # fmt: on return rename_keys def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase = """""" else: _UpperCAmelCase = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) _UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase = in_proj_bias[: config.hidden_size] _UpperCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(lowercase ,lowercase ) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = dct.pop(lowercase ) _UpperCAmelCase = val def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" _UpperCAmelCase = BitConfig( global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,) _UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 ) _UpperCAmelCase = False # load original model from timm _UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase = timm_model.state_dict() if base_model: remove_classification_head_(lowercase ) _UpperCAmelCase = create_rename_keys(lowercase ,lowercase ) for src, dest in rename_keys: rename_key(lowercase ,lowercase ,lowercase ) read_in_q_k_v(lowercase ,lowercase ,lowercase ) _UpperCAmelCase = """huggingface/label-files""" _UpperCAmelCase = """imagenet-1k-id2label.json""" _UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) ) _UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCAmelCase = ViTHybridModel(lowercase ).eval() else: _UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval() model.load_state_dict(lowercase ) # create image processor _UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) ) _UpperCAmelCase = transform.transforms _UpperCAmelCase = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } _UpperCAmelCase = ViTHybridImageProcessor( do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) _UpperCAmelCase = prepare_img() _UpperCAmelCase = transform(lowercase ).unsqueeze(0 ) _UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(lowercase ,lowercase ) # verify logits with torch.no_grad(): _UpperCAmelCase = model(lowercase ) _UpperCAmelCase = outputs.logits print("""Predicted class:""" ,logits.argmax(-1 ).item() ) if base_model: _UpperCAmelCase = timm_model.forward_features(lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 ) else: _UpperCAmelCase = timm_model(lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(lowercase ).mkdir(exist_ok=lowercase ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowercase ) if push_to_hub: print(f'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(f'''ybelkada/{vit_name}''' ) processor.push_to_hub(f'''ybelkada/{vit_name}''' ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
30
1
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def __UpperCAmelCase ( lowercase ): """simple docstring""" return "".join(sorted(lowercase ) ) def __UpperCAmelCase ( lowercase ): """simple docstring""" return word_by_signature[signature(lowercase )] UpperCAmelCase__ = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") UpperCAmelCase__ = sorted({word.strip().lower() for word in data.splitlines()}) UpperCAmelCase__ = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": UpperCAmelCase__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
30
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ["""PerceiverFeatureExtractor"""] UpperCAmelCase__ = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase ) _UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go _UpperCAmelCase = parser.parse_args() if not hasattr(lowercase ,"""func""" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
30
1
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
30
"""simple docstring""" def __UpperCAmelCase ( lowercase ): """simple docstring""" return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
30
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class a ( lowerCAmelCase_ ): def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): _UpperCAmelCase = dataset _UpperCAmelCase = process _UpperCAmelCase = params def __len__( self : str ): return len(self.dataset ) def __getitem__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ): _UpperCAmelCase = self.dataset[i] _UpperCAmelCase = self.process(__lowerCAmelCase , **self.params ) return processed class a ( lowerCAmelCase_ ): def __init__( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple=None ): _UpperCAmelCase = loader _UpperCAmelCase = infer _UpperCAmelCase = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase = None _UpperCAmelCase = loader_batch_size # Internal bookkeeping _UpperCAmelCase = None _UpperCAmelCase = None def __len__( self : Dict ): return len(self.loader ) def __iter__( self : List[Any] ): _UpperCAmelCase = iter(self.loader ) return self def lowerCAmelCase_ ( self : Union[str, Any] ): if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase = {} for k, element in self._loader_batch_data.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # Convert ModelOutput to tuple first _UpperCAmelCase = element.to_tuple() if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__lowerCAmelCase , __lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): _UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): _UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase = self._loader_batch_data.__class__(__lowerCAmelCase ) self._loader_batch_index += 1 return result def lowerCAmelCase_ ( self : str ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase = next(self.iterator ) _UpperCAmelCase = self.infer(__lowerCAmelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(__lowerCAmelCase , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = len(__lowerCAmelCase ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase = processed _UpperCAmelCase = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class a ( lowerCAmelCase_ ): def __init__( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : List[Any]=None ): super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def __iter__( self : str ): _UpperCAmelCase = iter(self.loader ) _UpperCAmelCase = None return self def lowerCAmelCase_ ( self : int ): if self.subiterator is None: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item _UpperCAmelCase = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) _UpperCAmelCase = next(self.subiterator ) return processed class a ( lowerCAmelCase_ ): def __iter__( self : Union[str, Any] ): _UpperCAmelCase = iter(self.loader ) return self def lowerCAmelCase_ ( self : List[str] ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _UpperCAmelCase = False _UpperCAmelCase = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop("""is_last""" ) accumulator.append(__lowerCAmelCase ) if is_last: return accumulator while not is_last: _UpperCAmelCase = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(__lowerCAmelCase , torch.Tensor ): _UpperCAmelCase = processed else: _UpperCAmelCase = list(processed.keys() )[0] _UpperCAmelCase = processed[key] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = len(__lowerCAmelCase ) else: _UpperCAmelCase = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase = observed_batch_size _UpperCAmelCase = processed _UpperCAmelCase = 0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase = self.loader_batch_item() _UpperCAmelCase = item.pop("""is_last""" ) accumulator.append(__lowerCAmelCase ) if is_last: return accumulator else: _UpperCAmelCase = processed _UpperCAmelCase = item.pop("""is_last""" ) accumulator.append(__lowerCAmelCase ) return accumulator class a ( lowerCAmelCase_ ): def __init__( self : Any , __lowerCAmelCase : Dataset , __lowerCAmelCase : str ): _UpperCAmelCase = dataset _UpperCAmelCase = key def __len__( self : List[str] ): return len(self.dataset ) def __getitem__( self : Optional[Any] , __lowerCAmelCase : str ): return self.dataset[i][self.key] class a ( lowerCAmelCase_ ): def __init__( self : List[Any] , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : str ): _UpperCAmelCase = dataset _UpperCAmelCase = keya _UpperCAmelCase = keya def __len__( self : Optional[Any] ): return len(self.dataset ) def __getitem__( self : List[Any] , __lowerCAmelCase : str ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
30
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" def __UpperCAmelCase ( lowercase ): """simple docstring""" # authorize twitter, initialize tweepy _UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase ) auth.set_access_token(lowercase ,lowercase ) _UpperCAmelCase = tweepy.API(lowercase ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 ) # save most recent tweets alltweets.extend(lowercase ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=lowercase ,count=2_00 ,max_id=lowercase ) # save most recent tweets alltweets.extend(lowercase ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'''...{len(lowercase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f: _UpperCAmelCase = csv.writer(lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
30
1
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def __UpperCAmelCase ( lowercase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = 2 while True: if is_prime(lowercase ): yield num num += 1 def __UpperCAmelCase ( lowercase = 2_00_00_00 ): """simple docstring""" return sum(takewhile(lambda lowercase : x < n ,prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
30
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = {} _UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""] _UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments) UpperCAmelCase__ = parser.parse_args() if args.num_workers is None: UpperCAmelCase__ = multiprocessing.cpu_count() UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase__ = time.time() UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""") print(F'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() UpperCAmelCase__ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCAmelCase__ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
30
1
"""simple docstring""" import csv import tweepy # Twitter API credentials UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" UpperCAmelCase__ = """""" def __UpperCAmelCase ( lowercase ): """simple docstring""" # authorize twitter, initialize tweepy _UpperCAmelCase = tweepy.OAuthHandler(lowercase ,lowercase ) auth.set_access_token(lowercase ,lowercase ) _UpperCAmelCase = tweepy.API(lowercase ) # initialize a list to hold all the tweepy Tweets _UpperCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) _UpperCAmelCase = api.user_timeline(screen_name=lowercase ,count=2_00 ) # save most recent tweets alltweets.extend(lowercase ) # save the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _UpperCAmelCase = api.user_timeline( screen_name=lowercase ,count=2_00 ,max_id=lowercase ) # save most recent tweets alltweets.extend(lowercase ) # update the id of the oldest tweet less one _UpperCAmelCase = alltweets[-1].id - 1 print(f'''...{len(lowercase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _UpperCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,"""w""" ) as f: _UpperCAmelCase = csv.writer(lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
30
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Any = 'layoutlmv3' def __init__( self : Optional[Any] , __lowerCAmelCase : Tuple=5_0265 , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : str=12 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Any=3072 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Any=512 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-5 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : List[str]=1024 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=128 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Any=128 , __lowerCAmelCase : int=64 , __lowerCAmelCase : List[str]=256 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Union[str, Any] , ): super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) _UpperCAmelCase = max_ad_position_embeddings _UpperCAmelCase = coordinate_size _UpperCAmelCase = shape_size _UpperCAmelCase = has_relative_attention_bias _UpperCAmelCase = rel_pos_bins _UpperCAmelCase = max_rel_pos _UpperCAmelCase = has_spatial_attention_bias _UpperCAmelCase = rel_ad_pos_bins _UpperCAmelCase = max_rel_ad_pos _UpperCAmelCase = text_embed _UpperCAmelCase = visual_embed _UpperCAmelCase = input_size _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = classifier_dropout class a ( lowerCAmelCase_ ): _snake_case : str = version.parse('1.12' ) @property def lowerCAmelCase_ ( self : Dict ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1e-5 @property def lowerCAmelCase_ ( self : List[str] ): return 12 def lowerCAmelCase_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) _UpperCAmelCase = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _UpperCAmelCase = [[[48, 84, 73, 128]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _UpperCAmelCase = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
30
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
30
"""simple docstring""" import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __UpperCAmelCase ( lowercase=None ,lowercase=None ): """simple docstring""" return field(default_factory=lambda: default ,metadata=lowercase ) @dataclass class a : _snake_case : str = field( metadata={'help': 'The csv file to plot.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={'help': 'Disable logarithmic scale when plotting'} , ) _snake_case : bool = field( default=lowerCAmelCase_ , metadata={ 'help': 'Whether the csv file has training results or inference results. Defaults to inference results.' } , ) _snake_case : Optional[str] = field( default=lowerCAmelCase_ , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , ) _snake_case : Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} ) def __UpperCAmelCase ( lowercase ): """simple docstring""" try: int(lowercase ) return True except ValueError: return False def __UpperCAmelCase ( lowercase ): """simple docstring""" try: float(lowercase ) return True except ValueError: return False class a : def __init__( self : int , __lowerCAmelCase : Union[str, Any] ): _UpperCAmelCase = args _UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: _UpperCAmelCase = csv.DictReader(__lowerCAmelCase ) for row in reader: _UpperCAmelCase = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None _UpperCAmelCase = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None _UpperCAmelCase = float(row["""result"""] ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = plt.subplots() _UpperCAmelCase = """Time usage""" if self.args.is_time else """Memory usage""" _UpperCAmelCase = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) _UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) _UpperCAmelCase = self.result_dict[model_name]["""result"""] ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _UpperCAmelCase = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _UpperCAmelCase = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCAmelCase , ) else: _UpperCAmelCase = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((_UpperCAmelCase) , (_UpperCAmelCase)) = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _UpperCAmelCase = np.asarray(__lowerCAmelCase , __lowerCAmelCase )[: len(__lowerCAmelCase )] plt.scatter( __lowerCAmelCase , __lowerCAmelCase , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(__lowerCAmelCase , __lowerCAmelCase , """--""" ) title_str += f''' {label_model_name} vs.''' _UpperCAmelCase = title_str[:-4] _UpperCAmelCase = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(__lowerCAmelCase ) plt.xlabel(__lowerCAmelCase ) plt.ylabel(__lowerCAmelCase ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = HfArgumentParser(lowercase ) _UpperCAmelCase = parser.parse_args_into_dataclasses()[0] _UpperCAmelCase = Plot(args=lowercase ) plot.plot() if __name__ == "__main__": main()
30
1
"""simple docstring""" import functools def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" # Validation if not isinstance(lowercase ,lowercase ) or not all(isinstance(lowercase ,lowercase ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(lowercase ) != 3 or not all(isinstance(lowercase ,lowercase ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(lowercase ) == 0: return 0 if min(lowercase ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(lowercase ) >= 3_66: raise ValueError("""All days elements should be less than 366""" ) _UpperCAmelCase = set(lowercase ) @functools.cache def dynamic_programming(lowercase ) -> int: if index > 3_65: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
30
"""simple docstring""" import os import pytest from attr import dataclass UpperCAmelCase__ = """us-east-1""" # defaults region @dataclass class a : _snake_case : str _snake_case : Tuple = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' _snake_case : List[Any] = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_00, 'save_steps': 55_00, } _snake_case : Optional[Any] = {**hyperparameters, 'max_steps': 10_00} @property def lowerCAmelCase_ ( self : Optional[Any] ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def lowerCAmelCase_ ( self : Dict ): return f'''{self.framework}-transfromers-test''' @property def lowerCAmelCase_ ( self : Union[str, Any] ): return f'''./tests/sagemaker/scripts/{self.framework}''' @property def lowerCAmelCase_ ( self : Dict ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
30
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase__ = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class a ( lowerCAmelCase_ ): _snake_case : Union[PIL.Image.Image, np.ndarray] class a ( lowerCAmelCase_ ): def __init__( self : Optional[int] , __lowerCAmelCase : PriorTransformer , __lowerCAmelCase : CLIPVisionModel , __lowerCAmelCase : CLIPImageProcessor , __lowerCAmelCase : HeunDiscreteScheduler , __lowerCAmelCase : ShapERenderer , ): super().__init__() self.register_modules( prior=__lowerCAmelCase , image_encoder=__lowerCAmelCase , image_processor=__lowerCAmelCase , scheduler=__lowerCAmelCase , renderer=__lowerCAmelCase , ) def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : int ): if latents is None: _UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _UpperCAmelCase = latents.to(__lowerCAmelCase ) _UpperCAmelCase = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[Any]=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' ) _UpperCAmelCase = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__lowerCAmelCase , __lowerCAmelCase ) @property def lowerCAmelCase_ ( self : Any ): if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(__lowerCAmelCase , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , ): if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ): _UpperCAmelCase = torch.cat(__lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__lowerCAmelCase , axis=0 ) if not isinstance(__lowerCAmelCase , torch.Tensor ): _UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__lowerCAmelCase ) _UpperCAmelCase = self.image_encoder(__lowerCAmelCase )["""last_hidden_state"""] _UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _UpperCAmelCase = image_embeds.repeat_interleave(__lowerCAmelCase , dim=0 ) if do_classifier_free_guidance: _UpperCAmelCase = torch.zeros_like(__lowerCAmelCase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(__lowerCAmelCase ) def __call__( self : Any , __lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : float = 4.0 , __lowerCAmelCase : int = 64 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , ): if isinstance(__lowerCAmelCase , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(__lowerCAmelCase , torch.Tensor ): _UpperCAmelCase = image.shape[0] elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _UpperCAmelCase = len(__lowerCAmelCase ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__lowerCAmelCase )}''' ) _UpperCAmelCase = self._execution_device _UpperCAmelCase = batch_size * num_images_per_prompt _UpperCAmelCase = guidance_scale > 1.0 _UpperCAmelCase = self._encode_image(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # prior self.scheduler.set_timesteps(__lowerCAmelCase , device=__lowerCAmelCase ) _UpperCAmelCase = self.scheduler.timesteps _UpperCAmelCase = self.prior.config.num_embeddings _UpperCAmelCase = self.prior.config.embedding_dim _UpperCAmelCase = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _UpperCAmelCase = latents.reshape(latents.shape[0] , __lowerCAmelCase , __lowerCAmelCase ) for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = self.prior( __lowerCAmelCase , timestep=__lowerCAmelCase , proj_embedding=__lowerCAmelCase , ).predicted_image_embedding # remove the variance _UpperCAmelCase , _UpperCAmelCase = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 ) _UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _UpperCAmelCase = self.scheduler.step( __lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=__lowerCAmelCase ) _UpperCAmelCase = [] for i, latent in enumerate(__lowerCAmelCase ): print() _UpperCAmelCase = self.renderer.decode( latent[None, :] , __lowerCAmelCase , size=__lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(__lowerCAmelCase ) _UpperCAmelCase = torch.stack(__lowerCAmelCase ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _UpperCAmelCase = images.cpu().numpy() if output_type == "pil": _UpperCAmelCase = [self.numpy_to_pil(__lowerCAmelCase ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=__lowerCAmelCase )
30
"""simple docstring""" import string from math import logaa def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = document.translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ).replace("""\n""" ,"""""" ) _UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = corpus.lower().translate( str.maketrans("""""" ,"""""" ,string.punctuation ) ) # strip all punctuation and replace it with '' _UpperCAmelCase = corpus_without_punctuation.split("""\n""" ) _UpperCAmelCase = term.lower() return (len([doc for doc in docs if term in doc] ), len(lowercase )) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) ,3 ) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" return round(tf * idf ,3 )
30
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class a ( lowerCAmelCase_ ): _snake_case : Union[str, Any] = 'blip_2_vision_model' def __init__( self : Tuple , __lowerCAmelCase : Dict=1408 , __lowerCAmelCase : Tuple=6144 , __lowerCAmelCase : Optional[Any]=39 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : int=224 , __lowerCAmelCase : Any=14 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.00_001 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=1e-1_0 , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : int , ): super().__init__(**__lowerCAmelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = intermediate_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = patch_size _UpperCAmelCase = image_size _UpperCAmelCase = initializer_range _UpperCAmelCase = attention_dropout _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = hidden_act _UpperCAmelCase = qkv_bias @classmethod def lowerCAmelCase_ ( cls : Optional[int] , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Optional[Any] ): cls._set_token_in_kwargs(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the vision config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": _UpperCAmelCase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class a ( lowerCAmelCase_ ): _snake_case : str = 'blip_2_qformer' def __init__( self : str , __lowerCAmelCase : Dict=3_0522 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Union[str, Any]=3072 , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : List[Any]=1e-1_2 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : Optional[Any]="absolute" , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Tuple=1408 , **__lowerCAmelCase : Optional[int] , ): super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = cross_attention_frequency _UpperCAmelCase = encoder_hidden_size @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , __lowerCAmelCase : Union[str, os.PathLike] , **__lowerCAmelCase : Tuple ): cls._set_token_in_kwargs(__lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get("""model_type""" ) == "blip-2": _UpperCAmelCase = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'blip-2' _snake_case : int = True def __init__( self : Any , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=32 , **__lowerCAmelCase : List[Any] ): super().__init__(**__lowerCAmelCase ) if vision_config is None: _UpperCAmelCase = {} logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" ) if qformer_config is None: _UpperCAmelCase = {} logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" ) if text_config is None: _UpperCAmelCase = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) _UpperCAmelCase = BlipaVisionConfig(**__lowerCAmelCase ) _UpperCAmelCase = BlipaQFormerConfig(**__lowerCAmelCase ) _UpperCAmelCase = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _UpperCAmelCase = CONFIG_MAPPING[text_model_type](**__lowerCAmelCase ) _UpperCAmelCase = self.text_config.tie_word_embeddings _UpperCAmelCase = self.text_config.is_encoder_decoder _UpperCAmelCase = num_query_tokens _UpperCAmelCase = self.vision_config.hidden_size _UpperCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _UpperCAmelCase = 1.0 _UpperCAmelCase = 0.02 @classmethod def lowerCAmelCase_ ( cls : Optional[int] , __lowerCAmelCase : BlipaVisionConfig , __lowerCAmelCase : BlipaQFormerConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : Dict , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCAmelCase , ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.vision_config.to_dict() _UpperCAmelCase = self.qformer_config.to_dict() _UpperCAmelCase = self.text_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase__ = logging.get_logger(__name__) logging.set_verbosity_info() def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: _UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) else: _UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(lowercase ) _UpperCAmelCase , _UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained( lowercase ,output_loading_info=lowercase ) _UpperCAmelCase = ["""key_proj""", """value_proj""", """query_proj"""] _UpperCAmelCase = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: _UpperCAmelCase = key.split(""".""" ) if attributes[0] == "lm_head": _UpperCAmelCase = prophet _UpperCAmelCase = prophet_old else: _UpperCAmelCase = prophet.prophetnet _UpperCAmelCase = prophet_old.model _UpperCAmelCase = False for attribute in attributes: if attribute in mapping: _UpperCAmelCase = mapping[attribute] if not hasattr(lowercase ,lowercase ) and len(lowercase ) > 0: _UpperCAmelCase = attribute elif hasattr(lowercase ,lowercase ): _UpperCAmelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _UpperCAmelCase = old_model.weight logger.info(f'''{attribute} is initialized.''' ) _UpperCAmelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _UpperCAmelCase = old_model.bias logger.info(f'''{attribute} is initialized''' ) _UpperCAmelCase = True break elif attribute in special_keys and hasattr(lowercase ,"""in_proj_weight""" ): _UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3 _UpperCAmelCase = getattr(lowercase ,lowercase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _UpperCAmelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings." _UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] ) _UpperCAmelCase = True break if attribute.isdigit(): _UpperCAmelCase = model[int(lowercase )] _UpperCAmelCase = old_model[int(lowercase )] else: _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_attribute == "": _UpperCAmelCase = old_model else: if not hasattr(lowercase ,lowercase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) _UpperCAmelCase = getattr(lowercase ,lowercase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(lowercase ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase__ = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
30
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore UpperCAmelCase__ = """ Human: <<task>> Assistant: """ UpperCAmelCase__ = """huggingface-tools/default-prompts""" UpperCAmelCase__ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def __UpperCAmelCase ( lowercase ,lowercase ,lowercase="run" ): """simple docstring""" if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("""\\s""" ,lowercase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( lowercase ,PROMPT_FILES[mode] ,repo_type="""dataset""" ,user_agent={"""agent""": agent_name} ) with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f: return f.read()
30
"""simple docstring""" import mpmath # for roots of unity import numpy as np class a : def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ): # Input as list _UpperCAmelCase = list(poly_a or [0] )[:] _UpperCAmelCase = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _UpperCAmelCase = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _UpperCAmelCase = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _UpperCAmelCase = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _UpperCAmelCase = self.__multiply() def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ): _UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(__lowerCAmelCase ) <= 1: return dft[0] # _UpperCAmelCase = self.c_max_length // 2 while next_ncol > 0: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root**next_ncol # First half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _UpperCAmelCase = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowerCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _UpperCAmelCase = new_dft _UpperCAmelCase = next_ncol // 2 return dft[0] def lowerCAmelCase_ ( self : Tuple ): _UpperCAmelCase = self.__dft("""A""" ) _UpperCAmelCase = self.__dft("""B""" ) _UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _UpperCAmelCase = 2 while next_ncol <= self.c_max_length: _UpperCAmelCase = [[] for i in range(__lowerCAmelCase )] _UpperCAmelCase = self.root ** (next_ncol // 2) _UpperCAmelCase = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _UpperCAmelCase = new_inverse_c next_ncol *= 2 # Unpack _UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): _UpperCAmelCase = """A = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _UpperCAmelCase = """B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _UpperCAmelCase = """A*B = """ + """ + """.join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
30
1