code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" _validate_point(__lowerCamelCase ) _validate_point(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(a - b ) for a, b in zip(__lowerCamelCase, __lowerCamelCase ) ) ) def A__ ( __lowerCamelCase ): """simple docstring""" if point: if isinstance(__lowerCamelCase, __lowerCamelCase ): for item in point: if not isinstance(__lowerCamelCase, (int, float) ): _lowerCAmelCase = ( 'Expected a list of numbers as input, found ' F'''{type(__lowerCamelCase ).__name__}''' ) raise TypeError(__lowerCamelCase ) else: _lowerCAmelCase = F'''Expected a list of numbers as input, found {type(__lowerCamelCase ).__name__}''' raise TypeError(__lowerCamelCase ) else: raise ValueError('Missing an input' ) def A__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" _validate_point(__lowerCamelCase ) _validate_point(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(x - y ) for x, y in zip(__lowerCamelCase, __lowerCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
589
"""simple docstring""" def A__ ( __lowerCamelCase ): """simple docstring""" if not head: return True # split the list to two parts _lowerCAmelCase , _lowerCAmelCase = head.next, head while fast and fast.next: _lowerCAmelCase = fast.next.next _lowerCAmelCase = slow.next _lowerCAmelCase = slow.next _lowerCAmelCase = None # Don't forget here! But forget still works! # reverse the second part _lowerCAmelCase = None while second: _lowerCAmelCase = second.next _lowerCAmelCase = node _lowerCAmelCase = second _lowerCAmelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False _lowerCAmelCase = node.next _lowerCAmelCase = head.next return True def A__ ( __lowerCamelCase ): """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) _lowerCAmelCase = _lowerCAmelCase = _lowerCAmelCase = head while fast and fast.next: _lowerCAmelCase , _lowerCAmelCase = fast.next.next, slow.next # 2. Push the second half into the stack _lowerCAmelCase = [slow.val] while slow.next: _lowerCAmelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False _lowerCAmelCase = cur.next return True def A__ ( __lowerCamelCase ): """simple docstring""" if not head or not head.next: return True _lowerCAmelCase = {} _lowerCAmelCase = 0 while head: if head.val in d: d[head.val].append(__lowerCamelCase ) else: _lowerCAmelCase = [pos] _lowerCAmelCase = head.next pos += 1 _lowerCAmelCase = pos - 1 _lowerCAmelCase = 0 for v in d.values(): if len(__lowerCamelCase ) % 2 != 0: middle += 1 else: _lowerCAmelCase = 0 for i in range(0, len(__lowerCamelCase ) ): if v[i] + v[len(__lowerCamelCase ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
589
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCamelCase__ = logging.getLogger(__name__) def UpperCAmelCase__ ( _A , _A ): """simple docstring""" if os.path.exists(_A ): if os.path.exists(os.path.join(_A , '''config.json''' ) ) and os.path.isfile( os.path.join(_A , '''config.json''' ) ): os.remove(os.path.join(_A , '''config.json''' ) ) if os.path.exists(os.path.join(_A , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(_A , '''pytorch_model.bin''' ) ): os.remove(os.path.join(_A , '''pytorch_model.bin''' ) ) else: os.makedirs(_A ) model.save_pretrained(_A ) def UpperCAmelCase__ ( _A , _A=False ): """simple docstring""" a_ = 2 if unlogit: a_ = torch.pow(_A , _A ) a_ = p * torch.log(_A ) a_ = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase__ ( _A ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f"{x + 1}" for x in range(len(_A ) ) ) ) for row in range(len(_A ) ): if tensor.dtype != torch.long: logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:.5f}" for x in tensor[row].cpu().data ) ) else: logger.info(f"layer {row + 1}:\t" + '''\t'''.join(f"{x:d}" for x in tensor[row].cpu().data ) ) def UpperCAmelCase__ ( _A , _A , _A , _A=True , _A=True , _A=None , _A=False ): """simple docstring""" a_ , a_ = model.config.num_hidden_layers, model.config.num_attention_heads a_ = torch.zeros(_A , _A ).to(args.device ) a_ = torch.zeros(_A , _A ).to(args.device ) if head_mask is None: a_ = torch.ones(_A , _A ).to(args.device ) head_mask.requires_grad_(requires_grad=_A ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: a_ = None a_ = 0.0 a_ = 0.0 for step, inputs in enumerate(tqdm(_A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): a_ = tuple(t.to(args.device ) for t in inputs ) ((a_ ) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) a_ = model(_A , labels=_A , head_mask=_A ) # (loss), lm_logits, presents, (all hidden_states), (attentions) a_ , a_ , a_ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_A ): a_ = entropy(attn.detach() , _A ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_A ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: a_ = 2 a_ = torch.pow(torch.pow(_A , _A ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0 if not args.dont_normalize_global_importance: a_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(_A ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(_A ) logger.info('''Head ranked by importance scores''' ) a_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) a_ = torch.arange( head_importance.numel() , device=args.device ) a_ = head_ranks.view_as(_A ) print_ad_tensor(_A ) return attn_entropy, head_importance, total_loss def UpperCAmelCase__ ( _A , _A , _A ): """simple docstring""" a_ , a_ , a_ = compute_heads_importance(_A , _A , _A , compute_entropy=_A ) a_ = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , _A , original_score * args.masking_threshold ) a_ = torch.ones_like(_A ) a_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) a_ = original_score while current_score >= original_score * args.masking_threshold: a_ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads a_ = float('''Inf''' ) a_ = head_importance.view(-1 ).sort()[1] if len(_A ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads a_ = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) a_ = new_head_mask.view(-1 ) a_ = 0.0 a_ = new_head_mask.view_as(_A ) a_ = new_head_mask.clone().detach() print_ad_tensor(_A ) # Compute metric and head importance again a_ , a_ , a_ = compute_heads_importance( _A , _A , _A , compute_entropy=_A , head_mask=_A ) a_ = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(_A ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase__ ( _A , _A , _A , _A ): """simple docstring""" a_ = datetime.now() a_ , a_ , a_ = compute_heads_importance( _A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A ) a_ = 1 / loss a_ = datetime.now() - before_time a_ = sum(p.numel() for p in model.parameters() ) a_ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_A ) ) } for k, v in heads_to_prune.items(): if isinstance(_A , _A ): a_ = [ v, ] assert sum(len(_A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_A ) a_ = sum(p.numel() for p in model.parameters() ) a_ = datetime.now() a_ , a_ , a_ = compute_heads_importance( _A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A , actually_pruned=_A , ) a_ = 1 / loss a_ = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _A , _A , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _A , _A ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(_A , args.output_dir ) def UpperCAmelCase__ ( ): """simple docstring""" a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=_A , type=_A , required=_A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=_A , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=_A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=_A , type=_A , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=_A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=_A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=_A , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=_A , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=_A , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=_A , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=_A , default=42 ) parser.add_argument('''--local_rank''' , type=_A , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' ) a_ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: a_ = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) a_ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) a_ = torch.device('''cuda''' , args.local_rank ) a_ = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) a_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: a_ = nn.parallel.DistributedDataParallel( _A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_A ) elif args.n_gpu > 1: a_ = nn.DataParallel(_A ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_A ) torch.save(_A , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , _A ) # Prepare dataset a_ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) a_ = (torch.from_numpy(_A ),) a_ = TensorDataset(*_A ) a_ = RandomSampler(_A ) a_ = DataLoader(_A , sampler=_A , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_A , _A , _A ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: a_ = mask_heads(_A , _A , _A ) prune_heads(_A , _A , _A , _A ) if __name__ == "__main__": main()
703
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = '''▁''' UpperCamelCase__ = {'''vocab_file''': '''spiece.model'''} UpperCamelCase__ = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''} } UpperCamelCase__ = { '''google/pegasus-xsum''': 512, } UpperCamelCase__ = logging.get_logger(__name__) class __lowercase ( a__ ): _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = ["input_ids", "attention_mask"] def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str]="<pad>" , lowercase__ : Any="</s>" , lowercase__ : Union[str, Any]="<unk>" , lowercase__ : Any="<mask_2>" , lowercase__ : int="<mask_1>" , lowercase__ : List[Any]=None , lowercase__ : List[str]=1_0_3 , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : List[Any] , ): a_ = offset if additional_special_tokens is not None: if not isinstance(lowercase__ , lowercase__ ): raise TypeError( f"additional_special_tokens should be of type {type(lowercase__ )}, but is" f" {type(lowercase__ )}" ) a_ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"<unk_{i}>" for i in range(len(lowercase__ ) , self.offset - 1 ) ] if len(set(lowercase__ ) ) != len(lowercase__ ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." ) a_ = additional_special_tokens_extended else: a_ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"<unk_{i}>" for i in range(2 , self.offset )] a_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase__ , unk_token=lowercase__ , mask_token=lowercase__ , pad_token=lowercase__ , mask_token_sent=lowercase__ , offset=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) a_ = mask_token_sent a_ = vocab_file a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase__ ) # add special tokens to encoder dict a_ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) a_ = {v: k for k, v in self.encoder.items()} @property def __magic_name__ ( self : Optional[Any] ): return len(self.sp_model ) + self.offset def __magic_name__ ( self : Dict ): a_ = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ): a_ = self.__dict__.copy() a_ = None return state def __setstate__( self : Tuple , lowercase__ : str ): a_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a_ = {} a_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self : Tuple , lowercase__ : str ): return self.sp_model.encode(lowercase__ , out_type=lowercase__ ) def __magic_name__ ( self : List[Any] , lowercase__ : str ): if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] a_ = self.sp_model.piece_to_id(lowercase__ ) return sp_id + self.offset def __magic_name__ ( self : str , lowercase__ : int ): if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: a_ = self.sp_model.IdToPiece(index - self.offset ) return token def __magic_name__ ( self : Optional[int] , lowercase__ : List[str] ): a_ = [] a_ = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase__ ) + token a_ = [] else: current_sub_tokens.append(lowercase__ ) out_string += self.sp_model.decode(lowercase__ ) return out_string.strip() def __magic_name__ ( self : Tuple , lowercase__ : Optional[int]=False ): return 1 def __magic_name__ ( self : Any , lowercase__ : Any ): a_ = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def __magic_name__ ( self : Union[str, Any] , lowercase__ : List , lowercase__ : Optional[List] = None , lowercase__ : bool = False ): if already_has_special_tokens: return self._special_token_mask(lowercase__ ) elif token_ids_a is None: return self._special_token_mask(lowercase__ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def __magic_name__ ( self : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[Any]=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Optional[str] = None ): if not os.path.isdir(lowercase__ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return a_ = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , '''wb''' ) as fi: a_ = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,)
143
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name _lowercase = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n" @dataclass class _UpperCAmelCase ( A__ ): UpperCamelCase__ = 42 class _UpperCAmelCase ( A__ ): def __init__( self , a__ , a__ , a__ , a__ , a__ , ): super().__init__() self.register_modules( prior=a__ , image_encoder=a__ , image_processor=a__ , scheduler=a__ , renderer=a__ , ) def snake_case_ ( self , a__ , a__ , a__ , a__ , a__ , a__): if latents is None: A__ = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__) else: if latents.shape != shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}") A__ = latents.to(a__) A__ = latents * scheduler.init_noise_sigma return latents def snake_case_ ( self , a__=0): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''') A__ = torch.device(F"cuda:{gpu_id}") A__ = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(a__ , a__) @property def snake_case_ ( self): if self.device != torch.device('''meta''') or not hasattr(self.image_encoder , '''_hf_hook'''): return self.device for module in self.image_encoder.modules(): if ( hasattr(a__ , '''_hf_hook''') and hasattr(module._hf_hook , '''execution_device''') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def snake_case_ ( self , a__ , a__ , a__ , a__ , ): if isinstance(a__ , a__) and isinstance(image[0] , torch.Tensor): A__ = torch.cat(a__ , axis=0) if image[0].ndim == 4 else torch.stack(a__ , axis=0) if not isinstance(a__ , torch.Tensor): A__ = self.image_processor(a__ , return_tensors='''pt''').pixel_values[0].unsqueeze(0) A__ = image.to(dtype=self.image_encoder.dtype , device=a__) A__ = self.image_encoder(a__)['''last_hidden_state'''] A__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 A__ = image_embeds.repeat_interleave(a__ , dim=0) if do_classifier_free_guidance: A__ = torch.zeros_like(a__) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A__ = torch.cat([negative_image_embeds, image_embeds]) return image_embeds @torch.no_grad() @replace_example_docstring(a__) def __call__( self , a__ , a__ = 1 , a__ = 2_5 , a__ = None , a__ = None , a__ = 4.0 , a__ = 6_4 , a__ = "pil" , a__ = True , ): if isinstance(a__ , PIL.Image.Image): A__ = 1 elif isinstance(a__ , torch.Tensor): A__ = image.shape[0] elif isinstance(a__ , a__) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)): A__ = len(a__) else: raise ValueError( F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(a__)}") A__ = self._execution_device A__ = batch_size * num_images_per_prompt A__ = guidance_scale > 1.0 A__ = self._encode_image(a__ , a__ , a__ , a__) # prior self.scheduler.set_timesteps(a__ , device=a__) A__ = self.scheduler.timesteps A__ = self.prior.config.num_embeddings A__ = self.prior.config.embedding_dim A__ = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim A__ = latents.reshape(latents.shape[0] , a__ , a__) for i, t in enumerate(self.progress_bar(a__)): # expand the latents if we are doing classifier free guidance A__ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents A__ = self.scheduler.scale_model_input(a__ , a__) A__ = self.prior( a__ , timestep=a__ , proj_embedding=a__ , ).predicted_image_embedding # remove the variance A__ , A__ = noise_pred.split( scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: A__ , A__ = noise_pred.chunk(2) A__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) A__ = self.scheduler.step( a__ , timestep=a__ , sample=a__ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=a__) A__ = [] for i, latent in enumerate(a__): print() A__ = self.renderer.decode( latent[None, :] , a__ , size=a__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(a__) A__ = torch.stack(a__) if output_type not in ["np", "pil"]: raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}") A__ = images.cpu().numpy() if output_type == "pil": A__ = [self.numpy_to_pil(a__) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''') and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=a__)
632
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _lowercase = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> int: A__ = {} state_dict.pop('''pixel_mean''' , UpperCamelCase_ ) state_dict.pop('''pixel_std''' , UpperCamelCase_ ) A__ = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A__ = key.replace(UpperCamelCase_ , UpperCamelCase_ ) if re.match(UpperCamelCase_ , UpperCamelCase_ ): A__ = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) ) if layer_nb == 0: A__ = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: A__ = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: A__ = key.replace('''layers.2''' , '''proj_out''' ) A__ = value A__ = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any]="ybelkada/segment-anything" )-> Optional[int]: A__ = hf_hub_download(UpperCamelCase_ , f"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: A__ = SamConfig() elif "sam_vit_l" in model_name: A__ = SamVisionConfig( hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , ) A__ = SamConfig( vision_config=UpperCamelCase_ , ) elif "sam_vit_h" in model_name: A__ = SamVisionConfig( hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , ) A__ = SamConfig( vision_config=UpperCamelCase_ , ) A__ = torch.load(UpperCamelCase_ , map_location='''cpu''' ) A__ = replace_keys(UpperCamelCase_ ) A__ = SamImageProcessor() A__ = SamProcessor(image_processor=UpperCamelCase_ ) A__ = SamModel(UpperCamelCase_ ) hf_model.load_state_dict(UpperCamelCase_ ) A__ = hf_model.to('''cuda''' ) A__ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' ) A__ = [[[4_0_0, 6_5_0]]] A__ = [[1]] A__ = processor(images=np.array(UpperCamelCase_ ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**UpperCamelCase_ ) A__ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 A__ = processor( images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**UpperCamelCase_ ) A__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 A__ = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),) A__ = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**UpperCamelCase_ ) A__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. A__ = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]] A__ = [[1, 1]] A__ = processor( images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A__ = hf_model(**UpperCamelCase_ ) A__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": _lowercase = argparse.ArgumentParser() _lowercase = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) _lowercase = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
632
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : List[str] = { 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class lowerCamelCase ( __UpperCAmelCase ): _SCREAMING_SNAKE_CASE = "git_vision_model" def __init__( self : str , __snake_case : Dict=7_68 , __snake_case : str=30_72 , __snake_case : Dict=12 , __snake_case : Dict=12 , __snake_case : Optional[int]=3 , __snake_case : List[Any]=2_24 , __snake_case : Dict=16 , __snake_case : int="quick_gelu" , __snake_case : List[str]=1e-5 , __snake_case : Optional[int]=0.0 , __snake_case : List[Any]=0.02 , **__snake_case : int , ): '''simple docstring''' super().__init__(**__snake_case ) _snake_case: Optional[int] = hidden_size _snake_case: Any = intermediate_size _snake_case: Union[str, Any] = num_hidden_layers _snake_case: Optional[int] = num_attention_heads _snake_case: int = num_channels _snake_case: Tuple = patch_size _snake_case: List[str] = image_size _snake_case: Optional[int] = initializer_range _snake_case: Tuple = attention_dropout _snake_case: str = layer_norm_eps _snake_case: Union[str, Any] = hidden_act @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[str] , __snake_case : Union[str, os.PathLike] , **__snake_case : List[str] ): '''simple docstring''' cls._set_token_in_kwargs(__snake_case ) _snake_case , _snake_case: List[str] = cls.get_config_dict(__snake_case , **__snake_case ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": _snake_case: str = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__snake_case , **__snake_case ) class lowerCamelCase ( __UpperCAmelCase ): _SCREAMING_SNAKE_CASE = "git" def __init__( self : int , __snake_case : Tuple=None , __snake_case : str=3_05_22 , __snake_case : Any=7_68 , __snake_case : List[Any]=6 , __snake_case : Optional[Any]=12 , __snake_case : Tuple=30_72 , __snake_case : Dict="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=10_24 , __snake_case : Any=0.02 , __snake_case : Union[str, Any]=1e-12 , __snake_case : Any=0 , __snake_case : Union[str, Any]="absolute" , __snake_case : str=True , __snake_case : int=False , __snake_case : Union[str, Any]=1_01 , __snake_case : Optional[int]=1_02 , __snake_case : int=None , **__snake_case : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , pad_token_id=__snake_case , **__snake_case ) if vision_config is None: _snake_case: Optional[int] = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) _snake_case: List[Any] = GitVisionConfig(**__snake_case ) _snake_case: Tuple = vocab_size _snake_case: Optional[Any] = hidden_size _snake_case: List[str] = num_hidden_layers _snake_case: Dict = num_attention_heads _snake_case: Union[str, Any] = hidden_act _snake_case: Optional[int] = intermediate_size _snake_case: Any = hidden_dropout_prob _snake_case: int = attention_probs_dropout_prob _snake_case: List[Any] = max_position_embeddings _snake_case: Optional[Any] = initializer_range _snake_case: str = layer_norm_eps _snake_case: Tuple = position_embedding_type _snake_case: Optional[int] = use_cache _snake_case: Any = tie_word_embeddings _snake_case: List[Any] = num_image_with_embedding _snake_case: Dict = bos_token_id _snake_case: Optional[Any] = eos_token_id def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' _snake_case: Dict = copy.deepcopy(self.__dict__ ) _snake_case: Tuple = self.vision_config.to_dict() _snake_case: Optional[Any] = self.__class__.model_type return output
273
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer A : Tuple = ['bert-base-uncased', 'bert-base-cased'] A : str = 'hf-internal-testing/tiny-bert-tf-only' if is_tf_available(): class lowerCamelCase ( tf.keras.Model ): def __init__( self : Union[str, Any] , __snake_case : List[Any] ): '''simple docstring''' super().__init__() _snake_case: List[Any] = tokenizer _snake_case: str = AutoConfig.from_pretrained(__snake_case ) _snake_case: List[Any] = TFAutoModel.from_config(__snake_case ) def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : List[str] ): '''simple docstring''' _snake_case: Optional[int] = self.tokenizer(__snake_case ) _snake_case: Tuple = self.bert(**__snake_case ) return out["pooler_output"] @require_tf @require_tensorflow_text class lowerCamelCase ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' super().setUp() _snake_case: List[Any] = [ BertTokenizer.from_pretrained(__snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _snake_case: List[Any] = [TFBertTokenizer.from_pretrained(__snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _snake_case: int = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] _snake_case: str = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): _snake_case: List[Any] = tokenizer(__snake_case , return_tensors='tf' , padding='longest' ) _snake_case: Optional[Any] = tf_tokenizer(__snake_case ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _snake_case: str = tf_tokenizer(self.paired_sentences ) _snake_case: Any = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _snake_case: List[str] = tf.function(__snake_case ) for test_inputs in (self.test_sentences, self.paired_sentences): _snake_case: Union[str, Any] = tf.constant(__snake_case ) _snake_case: Any = compiled_tokenizer(__snake_case ) _snake_case: Union[str, Any] = tf_tokenizer(__snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _snake_case: Optional[Any] = ModelToSave(tokenizer=__snake_case ) _snake_case: Tuple = tf.convert_to_tensor(self.test_sentences ) _snake_case: List[str] = model(__snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _snake_case: List[str] = Path(__snake_case ) / 'saved.model' model.save(__snake_case ) _snake_case: int = tf.keras.models.load_model(__snake_case ) _snake_case: int = loaded_model(__snake_case ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
273
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """OPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OPTForCausalLM""", """OPTModel""", """OPTPreTrainedModel""", """OPTForSequenceClassification""", """OPTForQuestionAnswering""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """FlaxOPTForCausalLM""", """FlaxOPTModel""", """FlaxOPTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
411
def lowerCamelCase_ ( lowerCAmelCase: int )-> int: assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: _snake_case : int = F"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCAmelCase ) else: _snake_case : str = sylvester(number - 1 ) _snake_case : Optional[int] = num - 1 _snake_case : List[Any] = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
411
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = """megatron-bert""" def __init__( self , A_=29056 , A_=1024 , A_=24 , A_=16 , A_=4096 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , **A_ , )-> Tuple: '''simple docstring''' super().__init__(pad_token_id=A_ , **A_ ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = position_embedding_type UpperCamelCase = use_cache
432
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : int = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
432
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
import argparse import os import torch from transformers.utils import WEIGHTS_NAME a__ = ['''small''', '''medium''', '''large'''] a__ = '''lm_head.decoder.weight''' a__ = '''lm_head.weight''' def A__ (snake_case : str , snake_case : str ) -> Union[str, Any]: __UpperCamelCase : Union[str, Any] = torch.load(snake_case ) __UpperCamelCase : int = d.pop(snake_case ) os.makedirs(snake_case , exist_ok=snake_case ) torch.save(snake_case , os.path.join(snake_case , snake_case ) ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--dialogpt_path''', default='''.''', type=str) a__ = parser.parse_args() for MODEL in DIALOGPT_MODELS: a__ = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") a__ = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
279
0
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
710
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : List[str] = logging.get_logger(__name__) snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } snake_case__ : List[str] = { """allenai/led-base-16384""": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case_ ( ): __lowercase = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) __lowercase = bs[:] __lowercase = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 __lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char return pairs class _A ( _lowercase ): '''simple docstring''' _snake_case : List[str] = VOCAB_FILES_NAMES _snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ): '''simple docstring''' __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle: __lowercase = json.load(lowerCamelCase ) __lowercase = {v: k for k, v in self.encoder.items()} __lowercase = errors # how to handle errors in decoding __lowercase = bytes_to_unicode() __lowercase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase , encoding="utf-8" ) as merges_handle: __lowercase = merges_handle.read().split("\n" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in bpe_merges] __lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) __lowercase = {} __lowercase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _snake_case ( self : Optional[int] ): '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[int] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : List[Any] , lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __lowercase = tuple(lowerCamelCase ) __lowercase = get_pairs(lowerCamelCase ) if not pairs: return token while True: __lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(lowerCamelCase ): try: __lowercase = word.index(lowerCamelCase , lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(lowerCamelCase ) __lowercase = new_word if len(lowerCamelCase ) == 1: break else: __lowercase = get_pairs(lowerCamelCase ) __lowercase = " ".join(lowerCamelCase ) __lowercase = word return word def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ): '''simple docstring''' __lowercase = [] for token in re.findall(self.pat , lowerCamelCase ): __lowercase = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) ) return bpe_tokens def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def _snake_case ( self : str , lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ): '''simple docstring''' __lowercase = "".join(lowerCamelCase ) __lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) __lowercase = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" ) __lowercase = 0 with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) __lowercase = token_index writer.write(" ".join(lowerCamelCase ) + "\n" ) index += 1 return vocab_file, merge_file def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase = [self.cls_token_id] __lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ): '''simple docstring''' __lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __lowercase = " " + text return (text, kwargs) def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ): '''simple docstring''' __lowercase = super()._pad( encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , ) # Load from model defaults if return_attention_mask is None: __lowercase = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __lowercase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase ) if needs_to_be_padded: __lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __lowercase = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __lowercase = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
655
0
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCAmelCase_ = logging.getLogger(__name__) @dataclass class lowercase__ : '''simple docstring''' a : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) a : List[str] = field( default=_UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) a : Optional[Any] = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) a : List[str] = field( default=_UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) a : Any = field(default=_UpperCamelCase , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a : Any = field( default=_UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class lowercase__ : '''simple docstring''' a : List[str] = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) a : Optional[int] = field( default=_UpperCamelCase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) a : str = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) a : Tuple = field( default=_UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowerCAmelCase_ ( ) -> Union[str, Any]: UpperCamelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) UpperCamelCase__ : List[str] = import_module('''tasks''' ) try: UpperCamelCase__ : str = getattr(_lowercase , model_args.task_type ) UpperCamelCase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _lowercase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task UpperCamelCase__ : Tuple = token_classification_task.get_labels(data_args.labels ) UpperCamelCase__ : Dict[int, str] = dict(enumerate(_lowercase ) ) UpperCamelCase__ : Optional[int] = len(_lowercase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase__ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , idalabel=_lowercase , labelaid={label: i for i, label in enumerate(_lowercase )} , cache_dir=model_args.cache_dir , ) UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) UpperCamelCase__ : Any = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , ) # Get datasets UpperCamelCase__ : int = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) UpperCamelCase__ : List[Any] = ( TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__UpperCAmelCase: np.ndarray , __UpperCAmelCase: np.ndarray ) -> Tuple[List[int], List[int]]: UpperCamelCase__ : str = np.argmax(_lowercase , axis=2 ) UpperCamelCase__ : Tuple = preds.shape UpperCamelCase__ : Tuple = [[] for _ in range(_lowercase )] UpperCamelCase__ : Dict = [[] for _ in range(_lowercase )] for i in range(_lowercase ): for j in range(_lowercase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict: UpperCamelCase__ : str = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_lowercase , _lowercase ), "precision": precision_score(_lowercase , _lowercase ), "recall": recall_score(_lowercase , _lowercase ), "f1": fa_score(_lowercase , _lowercase ), } # Data collator UpperCamelCase__ : List[str] = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer UpperCamelCase__ : List[str] = Trainer( model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCamelCase__ : List[str] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCamelCase__ : Union[str, Any] = trainer.evaluate() UpperCamelCase__ : Tuple = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(_lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , _lowercase , _lowercase ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_lowercase ) # Predict if training_args.do_predict: UpperCamelCase__ : List[Any] = TokenClassificationDataset( token_classification_task=_lowercase , data_dir=data_args.data_dir , tokenizer=_lowercase , labels=_lowercase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) UpperCamelCase__ : Any = trainer.predict(_lowercase ) UpperCamelCase__ : Optional[int] = align_predictions(_lowercase , _lowercase ) UpperCamelCase__ : Optional[Any] = os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(_lowercase , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , _lowercase , _lowercase ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(_lowercase , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(_lowercase , _lowercase , _lowercase ) return results def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> Tuple: main() if __name__ == "__main__": main()
253
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def SCREAMING_SNAKE_CASE__ ( ) -> int: '''simple docstring''' lowercase__ : str = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_lowercase , default=_lowercase , required=_lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_lowercase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_lowercase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_lowercase , default=42 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_lowercase , default=0 , help='cuda_id.' , ) lowercase__ : Any = parser.parse_args() return args def SCREAMING_SNAKE_CASE__ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] ) -> str: '''simple docstring''' if not len(_lowercase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) lowercase__ , lowercase__ : Any = imgs[0].size lowercase__ : int = Image.new('RGB' , size=(cols * w, rows * h) ) lowercase__ , lowercase__ : Any = grid.size for i, img in enumerate(_lowercase ): grid.paste(_lowercase , box=(i % cols * w, i // cols * h) ) return grid def SCREAMING_SNAKE_CASE__ ( _lowercase : Optional[int] , _lowercase : Tuple="robotic cat with wings" , _lowercase : Optional[Any]=7.5 , _lowercase : int=50 , _lowercase : str=1 , _lowercase : Any=42 , ) -> str: '''simple docstring''' lowercase__ : int = torch.Generator(pipeline.device ).manual_seed(_lowercase ) lowercase__ : Any = pipeline( _lowercase , guidance_scale=_lowercase , num_inference_steps=_lowercase , generator=_lowercase , num_images_per_prompt=_lowercase , ).images lowercase__ : Dict = int(math.sqrt(_lowercase ) ) lowercase__ : int = image_grid(_lowercase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images __UpperCamelCase: int = parse_args() # Load models and create wrapper for stable diffusion __UpperCamelCase: Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") __UpperCamelCase: List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") __UpperCamelCase: int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") __UpperCamelCase: int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") __UpperCamelCase: Tuple = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) __UpperCamelCase: List[str] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): __UpperCamelCase: Dict = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: __UpperCamelCase: Dict = unet.to(torch.device("""cuda""", args.cuda_id)) __UpperCamelCase: Optional[int] = pipeline.to(unet.device) __UpperCamelCase, __UpperCamelCase: Union[str, Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) __UpperCamelCase: List[str] = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
266
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule __A = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
61
0
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = abs(SCREAMING_SNAKE_CASE_ ) lowercase = 0 while n > 0: res += n % 10 n //= 10 return res def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = abs(SCREAMING_SNAKE_CASE_ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): return sum(int(SCREAMING_SNAKE_CASE_ ) for c in str(abs(SCREAMING_SNAKE_CASE_ ) ) ) def UpperCAmelCase_ ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None: lowercase = F'''{func.__name__}({value})''' lowercase = timeit(F'''__main__.{call}''' , setup='import __main__' ) print(F'''{call:56} = {func(SCREAMING_SNAKE_CASE_ )} -- {timing:.4f} seconds''' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
84
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ (__snake_case ): __lowerCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , a="</s>" , a="<unk>" , a="<pad>" , a=125 , a=None , **a , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: lowercase__ : Any = [f"""<extra_id_{i}>""" for i in range(a)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens lowercase__ : List[Any] = len(set(filter(lambda a: bool('extra_id' in str(a)) , a))) if extra_tokens != extra_ids: raise ValueError( f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the' ' extra_ids tokens') lowercase__ : List[str] = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else pad_token lowercase__ : Tuple = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else eos_token lowercase__ : str = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else unk_token super().__init__( eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , **a , ) lowercase__ : str = extra_ids lowercase__ : Any = 2**8 # utf is 8 bits # define special tokens dict lowercase__ : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } lowercase__ : Optional[Any] = len(self.special_tokens_encoder) lowercase__ : List[str] = len(a) for i, token in enumerate(a): lowercase__ : List[Any] = self.vocab_size + i - n lowercase__ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def snake_case_ ( self): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def snake_case_ ( self , a , a = None , a = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a) # normal case: some special tokens if token_ids_a is None: return ([0] * len(a)) + [1] return ([0] * len(a)) + [1] + ([0] * len(a)) + [1] def snake_case_ ( self , a): if len(a) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" ' eos tokens being added.') return token_ids else: return token_ids + [self.eos_token_id] def snake_case_ ( self , a , a = None): lowercase__ : Tuple = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def snake_case_ ( self , a , a = None): lowercase__ : List[str] = self._add_eos_if_not_present(a) if token_ids_a is None: return token_ids_a else: lowercase__ : Union[str, Any] = self._add_eos_if_not_present(a) return token_ids_a + token_ids_a def snake_case_ ( self , a): lowercase__ : Any = [chr(a) for i in text.encode('utf-8')] return tokens def snake_case_ ( self , a): if token in self.special_tokens_encoder: lowercase__ : Tuple = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: lowercase__ : Tuple = self.added_tokens_encoder[token] elif len(a) != 1: lowercase__ : Tuple = self.unk_token_id else: lowercase__ : Optional[Any] = ord(a) + self._num_special_tokens return token_id def snake_case_ ( self , a): if index in self.special_tokens_decoder: lowercase__ : Dict = self.special_tokens_decoder[index] else: lowercase__ : List[Any] = chr(index - self._num_special_tokens) return token def snake_case_ ( self , a): lowercase__ : Optional[Any] = B'' for token in tokens: if token in self.special_tokens_decoder: lowercase__ : Optional[int] = self.special_tokens_decoder[token].encode('utf-8') elif token in self.added_tokens_decoder: lowercase__ : Union[str, Any] = self.special_tokens_decoder[token].encode('utf-8') elif token in self.special_tokens_encoder: lowercase__ : List[str] = token.encode('utf-8') elif token in self.added_tokens_encoder: lowercase__ : Optional[int] = token.encode('utf-8') else: lowercase__ : Optional[Any] = bytes([ord(a)]) bstring += tok_string lowercase__ : List[Any] = bstring.decode('utf-8' , errors='ignore') return string def snake_case_ ( self , a , a = None): return ()
164
0
"""simple docstring""" import qiskit def UpperCamelCase_ ( lowerCamelCase : int = 2 ) -> qiskit.result.counts.Counts: """simple docstring""" __magic_name__ : List[str] = qubits # Using Aer's simulator __magic_name__ : Union[str, Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register __magic_name__ : Any = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , __lowerCAmelCase ): # Adding CX (CNOT) gate circuit.cx(i - 1 , __lowerCAmelCase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(__lowerCAmelCase ) ) , list(range(__lowerCAmelCase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator __magic_name__ : List[Any] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 ) return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": print(F"""Total count for various states are: {quantum_entanglement(3)}""")
712
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCamelCase ( metaclass=lowerCamelCase__ ): """simple docstring""" snake_case_ = ['note_seq'] def __init__( self : List[Any] , *snake_case : Any , **snake_case : Any ) -> Any: '''simple docstring''' requires_backends(self , ['''note_seq'''] ) @classmethod def _UpperCAmelCase ( cls : List[str] , *snake_case : Optional[int] , **snake_case : str ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''note_seq'''] ) @classmethod def _UpperCAmelCase ( cls : Optional[int] , *snake_case : Optional[int] , **snake_case : Tuple ) -> List[str]: '''simple docstring''' requires_backends(cls , ['''note_seq'''] )
147
0
'''simple docstring''' def __A ( lowerCamelCase_=2_81_23 ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i SCREAMING_SNAKE_CASE : List[str] = set() SCREAMING_SNAKE_CASE : List[str] = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(lowerCamelCase_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
379
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''Wav2Vec2FeatureExtractor''' SCREAMING_SNAKE_CASE__ = '''AutoTokenizer''' def __init__( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = self.feature_extractor SCREAMING_SNAKE_CASE : List[Any] = False @classmethod def lowerCamelCase_ ( cls : int , lowerCamelCase_ : List[str] , **lowerCamelCase_ : Any ): '''simple docstring''' try: return super().from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) except OSError: warnings.warn( f'''Loading a tokenizer inside {cls.__name__} from a config that does not''' """ include a `tokenizer_class` attribute is deprecated and will be """ """removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`""" """ attribute to either your `config.json` or `tokenizer_config.json` """ """file to suppress this warning: """ , lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = WavaVecaCTCTokenizer.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) return cls(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ ) def __call__( self : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*lowerCamelCase_ , **lowerCamelCase_ ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""raw_speech""" ) else: SCREAMING_SNAKE_CASE : Any = kwargs.pop("""audio""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("""sampling_rate""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = kwargs.pop("""text""" , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : Union[str, Any] = args[0] SCREAMING_SNAKE_CASE : Dict = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: SCREAMING_SNAKE_CASE : str = self.feature_extractor(lowerCamelCase_ , *lowerCamelCase_ , sampling_rate=lowerCamelCase_ , **lowerCamelCase_ ) if text is not None: SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(lowerCamelCase_ , **lowerCamelCase_ ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE : List[str] = encodings["""input_ids"""] return inputs def lowerCamelCase_ ( self : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = kwargs.pop("""input_features""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""labels""" , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : Optional[int] = args[0] SCREAMING_SNAKE_CASE : Optional[Any] = args[1:] if input_features is not None: SCREAMING_SNAKE_CASE : Any = self.feature_extractor.pad(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) if labels is not None: SCREAMING_SNAKE_CASE : int = self.tokenizer.pad(lowerCamelCase_ , **lowerCamelCase_ ) if labels is None: return input_features elif input_features is None: return labels else: SCREAMING_SNAKE_CASE : Dict = labels["""input_ids"""] return input_features def lowerCamelCase_ ( self : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @contextmanager def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer yield SCREAMING_SNAKE_CASE : int = self.feature_extractor SCREAMING_SNAKE_CASE : Optional[Any] = False
379
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration A__ = 50000 A__ = 5000 A__ , A__ = os.path.split(__file__) A__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def _lowercase ( a_ : datasets.Dataset ,a_ : List[Any] ) -> Any: '''simple docstring''' for i in range(a_ ): __magic_name__ = dataset[i] @get_duration def _lowercase ( a_ : datasets.Dataset ,a_ : Tuple ,a_ : Tuple ) -> Optional[int]: '''simple docstring''' for i in range(0 ,len(a_ ) ,a_ ): __magic_name__ = dataset[i : i + batch_size] @get_duration def _lowercase ( a_ : datasets.Dataset ,a_ : Dict ,a_ : Union[str, Any] ) -> List[str]: '''simple docstring''' with dataset.formatted_as(type=a_ ): for i in range(a_ ): __magic_name__ = dataset[i] @get_duration def _lowercase ( a_ : datasets.Dataset ,a_ : List[str] ,a_ : int ,a_ : List[str] ) -> Union[str, Any]: '''simple docstring''' with dataset.formatted_as(type=a_ ): for i in range(0 ,a_ ,a_ ): __magic_name__ = dataset[i : i + batch_size] def _lowercase ( ) -> Any: '''simple docstring''' __magic_name__ = {'num examples': SPEED_TEST_N_EXAMPLES} __magic_name__ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}), ] __magic_name__ = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_0_0_0}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_0_0_0}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) __magic_name__ = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) __magic_name__ = generate_example_dataset( os.path.join(a_ ,'dataset.arrow' ) ,a_ ,num_examples=a_ ,seq_shapes={'list': (1_0_0,)} ,) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ ,str(a_ ) ) __magic_name__ = func(a_ ,**a_ ) print('shuffling dataset' ) __magic_name__ = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' ,func.__name__ ,str(a_ ) ) __magic_name__ = func( a_ ,**a_ ) with open(a_ ,'wb' ) as f: f.write(json.dumps(a_ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
184
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : str = DownBlockaD # noqa F405 _lowercase : Union[str, Any] = "down" def _SCREAMING_SNAKE_CASE ( self: List[str] ): '''simple docstring''' __magic_name__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405 _lowercase : Union[str, Any] = "down" def _SCREAMING_SNAKE_CASE ( self: int ): '''simple docstring''' __magic_name__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Dict = AttnDownBlockaD # noqa F405 _lowercase : List[Any] = "down" def _SCREAMING_SNAKE_CASE ( self: Any ): '''simple docstring''' __magic_name__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : int = CrossAttnDownBlockaD # noqa F405 _lowercase : Any = "down" def _SCREAMING_SNAKE_CASE ( self: int ): '''simple docstring''' __magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common() __magic_name__ = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: str ): '''simple docstring''' __magic_name__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 _lowercase : List[str] = "down" @property def _SCREAMING_SNAKE_CASE ( self: Any ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' __magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common() __magic_name__ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' ) def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' __magic_name__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : str = SkipDownBlockaD # noqa F405 _lowercase : Union[str, Any] = "down" @property def _SCREAMING_SNAKE_CASE ( self: Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: List[str] ): '''simple docstring''' __magic_name__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Tuple = AttnSkipDownBlockaD # noqa F405 _lowercase : str = "down" @property def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' return super().get_dummy_input(include_skip_sample=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Dict ): '''simple docstring''' __magic_name__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Optional[int] = DownEncoderBlockaD # noqa F405 _lowercase : List[str] = "down" @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Dict ): '''simple docstring''' __magic_name__ = { 'in_channels': 32, 'out_channels': 32, } __magic_name__ = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: List[str] ): '''simple docstring''' __magic_name__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : List[Any] = AttnDownEncoderBlockaD # noqa F405 _lowercase : Optional[Any] = "down" @property def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ): '''simple docstring''' __magic_name__ = { 'in_channels': 32, 'out_channels': 32, } __magic_name__ = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: Dict ): '''simple docstring''' __magic_name__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : List[Any] = UNetMidBlockaD # noqa F405 _lowercase : Any = "mid" def _SCREAMING_SNAKE_CASE ( self: List[str] ): '''simple docstring''' __magic_name__ = { 'in_channels': 32, 'temb_channels': 1_28, } __magic_name__ = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: str ): '''simple docstring''' __magic_name__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : str = UNetMidBlockaDCrossAttn # noqa F405 _lowercase : int = "mid" def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' __magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common() __magic_name__ = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: Tuple ): '''simple docstring''' __magic_name__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _lowercase : str = "mid" @property def _SCREAMING_SNAKE_CASE ( self: Any ): '''simple docstring''' return super().get_dummy_input(include_encoder_hidden_states=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Tuple ): '''simple docstring''' __magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common() __magic_name__ = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: Any ): '''simple docstring''' __magic_name__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : List[Any] = UpBlockaD # noqa F405 _lowercase : List[Any] = "up" @property def _SCREAMING_SNAKE_CASE ( self: Tuple ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Any ): '''simple docstring''' __magic_name__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : List[Any] = ResnetUpsampleBlockaD # noqa F405 _lowercase : Dict = "up" @property def _SCREAMING_SNAKE_CASE ( self: Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Dict ): '''simple docstring''' __magic_name__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Any = CrossAttnUpBlockaD # noqa F405 _lowercase : Union[str, Any] = "up" @property def _SCREAMING_SNAKE_CASE ( self: Optional[int] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: str ): '''simple docstring''' __magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common() __magic_name__ = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: Any ): '''simple docstring''' __magic_name__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : str = SimpleCrossAttnUpBlockaD # noqa F405 _lowercase : Tuple = "up" @property def _SCREAMING_SNAKE_CASE ( self: List[str] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase , include_encoder_hidden_states=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): '''simple docstring''' __magic_name__, __magic_name__ = super().prepare_init_args_and_inputs_for_common() __magic_name__ = 32 return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' __magic_name__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Optional[Any] = AttnUpBlockaD # noqa F405 _lowercase : Optional[int] = "up" @property def _SCREAMING_SNAKE_CASE ( self: str ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase ) @unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' ) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): '''simple docstring''' __magic_name__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Union[str, Any] = SkipUpBlockaD # noqa F405 _lowercase : int = "up" @property def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): '''simple docstring''' __magic_name__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Union[str, Any] = AttnSkipUpBlockaD # noqa F405 _lowercase : Optional[Any] = "up" @property def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ): '''simple docstring''' return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: Tuple ): '''simple docstring''' __magic_name__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : List[str] = UpDecoderBlockaD # noqa F405 _lowercase : List[str] = "up" @property def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' __magic_name__ = {'in_channels': 32, 'out_channels': 32} __magic_name__ = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ): '''simple docstring''' __magic_name__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(__UpperCamelCase ) class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _lowercase : Optional[Any] = AttnUpDecoderBlockaD # noqa F405 _lowercase : Any = "up" @property def _SCREAMING_SNAKE_CASE ( self: Dict ): '''simple docstring''' return super().get_dummy_input(include_temb=__UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( self: str ): '''simple docstring''' __magic_name__ = {'in_channels': 32, 'out_channels': 32} __magic_name__ = self.dummy_input return init_dict, inputs_dict def _SCREAMING_SNAKE_CASE ( self: List[Any] ): '''simple docstring''' __magic_name__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(__UpperCamelCase )
184
1
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : Union[str, Any] ) -> str: _SCREAMING_SNAKE_CASE : Tuple = int(a__ ) assert noofclusters < len(a__ ) # Find out the dimensionality _SCREAMING_SNAKE_CASE : int = len(vectors[0] ) # Will help select random centroids from among the available vectors _SCREAMING_SNAKE_CASE : int = list(range(len(a__ ) ) ) shuffle(a__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _SCREAMING_SNAKE_CASE : Optional[int] = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _SCREAMING_SNAKE_CASE : Any = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _SCREAMING_SNAKE_CASE : Tuple = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _SCREAMING_SNAKE_CASE : List[str] = tf.placeholder("float64", [dim] ) _SCREAMING_SNAKE_CASE : Dict = [] for centroid in centroids: cent_assigns.append(tf.assign(a__, a__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _SCREAMING_SNAKE_CASE : List[Any] = [tf.Variable(0 ) for i in range(len(a__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _SCREAMING_SNAKE_CASE : Dict = tf.placeholder("int32" ) _SCREAMING_SNAKE_CASE : Optional[int] = [] for assignment in assignments: cluster_assigns.append(tf.assign(a__, a__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _SCREAMING_SNAKE_CASE : int = tf.placeholder("float", [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _SCREAMING_SNAKE_CASE : Optional[int] = tf.reduce_mean(a__, 0 ) ##Node for computing Euclidean distances # Placeholders for input _SCREAMING_SNAKE_CASE : Any = tf.placeholder("float", [dim] ) _SCREAMING_SNAKE_CASE : Optional[int] = tf.placeholder("float", [dim] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__, a__ ), 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _SCREAMING_SNAKE_CASE : Tuple = tf.placeholder("float", [noofclusters] ) _SCREAMING_SNAKE_CASE : Dict = tf.argmin(a__, 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _SCREAMING_SNAKE_CASE : Union[str, Any] = tf.initialize_all_variables() # Initialize all variables sess.run(a__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _SCREAMING_SNAKE_CASE : Any = 1_0_0 for _ in range(a__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(a__ ) ): _SCREAMING_SNAKE_CASE : Tuple = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _SCREAMING_SNAKE_CASE : Tuple = [ sess.run(a__, feed_dict={va: vect, va: sess.run(a__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _SCREAMING_SNAKE_CASE : Optional[int] = sess.run( a__, feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n], feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(a__ ): # Collect all the vectors assigned to this cluster _SCREAMING_SNAKE_CASE : Optional[int] = [ vectors[i] for i in range(len(a__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _SCREAMING_SNAKE_CASE : List[Any] = sess.run( a__, feed_dict={mean_input: array(a__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n], feed_dict={centroid_value: new_location} ) # Return centroids and assignments _SCREAMING_SNAKE_CASE : Tuple = sess.run(a__ ) _SCREAMING_SNAKE_CASE : int = sess.run(a__ ) return centroids, assignments
572
'''simple docstring''' from math import isqrt def a__ ( a__ ): """simple docstring""" return all(number % divisor != 0 for divisor in range(2 , isqrt(a__ ) + 1 ) ) def a__ ( a__ = 10**6 ): """simple docstring""" __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 7 while prime_candidate < max_prime: primes_count += is_prime(a__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"""{solution() = }""")
627
0
def a__ ( a = 1_0_0 ) -> int: A_ : int = n * (n + 1) * (2 * n + 1) / 6 A_ : Dict = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'{solution() = }')
236
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __UpperCAmelCase( A__ ): """simple docstring""" __magic_name__ = """open-llama""" def __init__( self , __magic_name__=10_0000 , __magic_name__=4096 , __magic_name__=1_1008 , __magic_name__=32 , __magic_name__=32 , __magic_name__="silu" , __magic_name__=2048 , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=True , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=False , __magic_name__=True , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=True , __magic_name__=None , **__magic_name__ , ): """simple docstring""" A_ : Union[str, Any] = vocab_size A_ : int = max_position_embeddings A_ : str = hidden_size A_ : Optional[int] = intermediate_size A_ : List[str] = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Tuple = hidden_act A_ : Union[str, Any] = initializer_range A_ : Optional[int] = rms_norm_eps A_ : Union[str, Any] = use_cache A_ : Optional[int] = kwargs.pop( '''use_memorry_efficient_attention''' , __magic_name__ ) A_ : int = hidden_dropout_prob A_ : Any = attention_dropout_prob A_ : Union[str, Any] = use_stable_embedding A_ : Optional[Any] = shared_input_output_embedding A_ : Union[str, Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , ) def UpperCAmelCase ( self ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) A_ : Tuple = self.rope_scaling.get('''type''' , __magic_name__ ) A_ : List[Any] = self.rope_scaling.get('''factor''' , __magic_name__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
236
1
import argparse import copy def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: snake_case : int = {} with open(_lowerCAmelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case : str = [] _list.append([line.split()[1], line.split()[2]] ) snake_case : List[str] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case : Dict = [] _list.append([line.split()[0], line.split()[2]] ) snake_case : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[Any]: with open(_lowerCAmelCase ) as f: snake_case : Union[str, Any] = f.read(1 ) snake_case : Optional[int] = start_node snake_case : List[Any] = [] snake_case : Optional[Any] = start_node snake_case : str = 0 while visiting not in first_solution: snake_case : Optional[int] = 10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution: snake_case : Tuple = k[1] snake_case : Union[str, Any] = k[0] first_solution.append(_lowerCAmelCase ) snake_case : Dict = distance_of_first_solution + int(_lowerCAmelCase ) snake_case : Optional[Any] = best_node first_solution.append(_lowerCAmelCase ) snake_case : Optional[int] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case : Dict = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]: snake_case : Optional[int] = [] for n in solution[1:-1]: snake_case : Dict = solution.index(_lowerCAmelCase ) for kn in solution[1:-1]: snake_case : List[str] = solution.index(_lowerCAmelCase ) if n == kn: continue snake_case : int = copy.deepcopy(_lowerCAmelCase ) snake_case : int = kn snake_case : Union[str, Any] = n snake_case : Any = 0 for k in _tmp[:-1]: snake_case : List[Any] = _tmp[_tmp.index(_lowerCAmelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case : Dict = distance + int(i[1] ) _tmp.append(_lowerCAmelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case : Dict = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowercase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Tuple: snake_case : List[str] = 1 snake_case : Optional[int] = first_solution snake_case : List[str] = [] snake_case : Dict = distance_of_first_solution snake_case : Optional[Any] = solution while count <= iters: snake_case : int = find_neighborhood(_lowerCAmelCase ,_lowerCAmelCase ) snake_case : Any = 0 snake_case : int = neighborhood[index_of_best_solution] snake_case : int = len(_lowerCAmelCase ) - 1 snake_case : List[Any] = False while not found: snake_case : Optional[int] = 0 while i < len(_lowerCAmelCase ): if best_solution[i] != solution[i]: snake_case : Optional[Any] = best_solution[i] snake_case : int = solution[i] break snake_case : str = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case : List[str] = True snake_case : Optional[Any] = best_solution[:-1] snake_case : int = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case : Any = cost snake_case : List[Any] = solution else: snake_case : str = index_of_best_solution + 1 snake_case : Optional[int] = neighborhood[index_of_best_solution] if len(_lowerCAmelCase ) >= size: tabu_list.pop(0 ) snake_case : Dict = count + 1 return best_solution_ever, best_cost def SCREAMING_SNAKE_CASE__ ( lowercase=None ) -> str: snake_case : Optional[int] = generate_neighbours(args.File ) snake_case : Optional[Any] = generate_first_solution( args.File ,_lowerCAmelCase ) snake_case : Tuple = tabu_search( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,args.Iterations ,args.Size ,) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
587
import unittest from transformers import DonutProcessor lowercase : Optional[int] = "naver-clova-ix/donut-base" class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' __UpperCamelCase : Tuple = DonutProcessor.from_pretrained(__UpperCamelCase ) def __lowerCamelCase ( self ) -> Optional[int]: '''simple docstring''' __UpperCamelCase : Tuple = { "name": "John Doe", "age": "99", "city": "Atlanta", "state": "GA", "zip": "30301", "phone": "123-4567", "nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}], } __UpperCamelCase : int = ( "<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>" "<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>" "<s_nicknames><s_nickname>Johnny</s_nickname>" "<sep/><s_nickname>JD</s_nickname></s_nicknames>" ) __UpperCamelCase : List[str] = self.processor.tokenajson(__UpperCamelCase ) self.assertDictEqual(__UpperCamelCase , __UpperCamelCase )
327
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__( __lowerCAmelCase ): @staticmethod @abstractmethod def __lowerCAmelCase( __UpperCamelCase : ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def __lowerCAmelCase( self : Any ): '''simple docstring''' raise NotImplementedError()
566
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed a__ = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) a__ = '''sshleifer/student_marian_en_ro_6_1''' a__ = '''sshleifer/tiny-mbart''' @require_torch class __magic_name__( __lowerCAmelCase ): def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : Any=False , __UpperCamelCase : int=None , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=True , ): '''simple docstring''' snake_case__ = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , predict_with_generate=__UpperCamelCase , do_train=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , ) snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history if not do_eval: return snake_case__ = [log for log in logs if """eval_loss""" in log.keys()] snake_case__ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats snake_case__ = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def __lowerCAmelCase( self : Optional[Any] ): '''simple docstring''' self.run_seqaseq_quick() @require_torch_multi_gpu def __lowerCAmelCase( self : Optional[Any] ): '''simple docstring''' self.run_seqaseq_quick(distributed=__UpperCamelCase ) @require_torch_multi_gpu def __lowerCAmelCase( self : Optional[Any] ): '''simple docstring''' self.run_seqaseq_quick(distributed=__UpperCamelCase ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase( self : List[str] ): '''simple docstring''' self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase( self : Optional[int] ): '''simple docstring''' self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase( self : Union[str, Any] ): '''simple docstring''' self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__UpperCamelCase ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def __lowerCAmelCase( self : Optional[Any] ): '''simple docstring''' self.run_seqaseq_quick( distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__UpperCamelCase ) @require_apex @require_torch_gpu def __lowerCAmelCase( self : Optional[int] ): '''simple docstring''' self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ): '''simple docstring''' snake_case__ = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } snake_case__ = experiments[experiment_id] snake_case__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} snake_case__ = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**__UpperCamelCase , extra_args_str=data["""extra_args_str"""] ) snake_case__ = len(re.findall(__UpperCamelCase , cl.err ) ) self.assertEqual(__UpperCamelCase , data["""n_matches"""] ) @slow def __lowerCAmelCase( self : Optional[Any] ): '''simple docstring''' snake_case__ = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__UpperCamelCase , ) # Check metrics snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history snake_case__ = [log for log in logs if """eval_loss""" in log.keys()] snake_case__ = eval_metrics[0] snake_case__ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase ) # test if do_predict saves generations and metrics snake_case__ = os.listdir(__UpperCamelCase ) snake_case__ = {os.path.basename(__UpperCamelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def __lowerCAmelCase( self : Union[str, Any] ): '''simple docstring''' from transformers.training_args import OptimizerNames def train_and_return_metrics(__UpperCamelCase : str ) -> Tuple[int, float]: snake_case__ = """--skip_memory_metrics 0""" snake_case__ = self.run_trainer( max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__UpperCamelCase , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , n_gpus_to_use=1 , ) # Check metrics snake_case__ = TrainerState.load_from_json(Path(__UpperCamelCase , """trainer_state.json""" ) ).log_history snake_case__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 ) snake_case__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 ) snake_case__ = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings snake_case__ = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __UpperCamelCase , __UpperCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( __UpperCamelCase , __UpperCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( __UpperCamelCase , __UpperCamelCase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def __lowerCAmelCase( self : str , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : float = 3E-3 , __UpperCamelCase : str = "adafactor" , __UpperCamelCase : bool = False , __UpperCamelCase : str = None , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = None , ): '''simple docstring''' snake_case__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" snake_case__ = self.get_auto_remove_tmp_dir() snake_case__ = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(__UpperCamelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(__UpperCamelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() snake_case__ = f""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(__UpperCamelCase )} """.split() snake_case__ = """ --do_predict """.split() snake_case__ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: snake_case__ = get_gpu_count() snake_case__ = get_torch_dist_unique_port() snake_case__ = f""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() snake_case__ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__UpperCamelCase , env=self.get_env() ) else: snake_case__ = ["""run_translation.py"""] + args with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ): main() return output_dir
566
1
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = 1.0 ,_lowerCamelCase = None ,) -> List[Any]: '''simple docstring''' super().__init__() __lowercase = initial_learning_rate __lowercase = warmup_steps __lowercase = power __lowercase = decay_schedule_fn __lowercase = name def __call__(self ,_lowerCamelCase ) -> int: '''simple docstring''' with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowercase = tf.cast(_lowerCamelCase ,tf.floataa ) __lowercase = tf.cast(self.warmup_steps ,tf.floataa ) __lowercase = global_step_float / warmup_steps_float __lowercase = self.initial_learning_rate * tf.math.pow(_lowerCamelCase ,self.power ) return tf.cond( global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=_lowerCamelCase ,) def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def _lowerCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 0.9 , lowerCamelCase_ : float = 0.9_99 , lowerCamelCase_ : float = 1E-8 , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : Optional[float] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[List[str]] = None , ): __lowercase = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=lowerCamelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCamelCase_ , ) if num_warmup_steps: __lowercase = WarmUp( initial_learning_rate=lowerCamelCase_ , decay_schedule_fn=lowerCamelCase_ , warmup_steps=lowerCamelCase_ , ) if weight_decay_rate > 0.0: __lowercase = AdamWeightDecay( learning_rate=lowerCamelCase_ , weight_decay_rate=lowerCamelCase_ , beta_a=lowerCamelCase_ , beta_a=lowerCamelCase_ , epsilon=lowerCamelCase_ , clipnorm=lowerCamelCase_ , global_clipnorm=lowerCamelCase_ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=lowerCamelCase_ , ) else: __lowercase = tf.keras.optimizers.Adam( learning_rate=lowerCamelCase_ , beta_a=lowerCamelCase_ , beta_a=lowerCamelCase_ , epsilon=lowerCamelCase_ , clipnorm=lowerCamelCase_ , global_clipnorm=lowerCamelCase_ , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self ,_lowerCamelCase = 0.0_0_1 ,_lowerCamelCase = 0.9 ,_lowerCamelCase = 0.9_9_9 ,_lowerCamelCase = 1E-7 ,_lowerCamelCase = False ,_lowerCamelCase = 0.0 ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = "AdamWeightDecay" ,**_lowerCamelCase ,) -> Tuple: '''simple docstring''' super().__init__(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) __lowercase = weight_decay_rate __lowercase = include_in_weight_decay __lowercase = exclude_from_weight_decay @classmethod def _UpperCAmelCase (cls ,_lowerCamelCase ) -> Optional[int]: '''simple docstring''' __lowercase = {'''WarmUp''': WarmUp} return super(_lowerCamelCase ,cls ).from_config(_lowerCamelCase ,custom_objects=_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> int: '''simple docstring''' super(_lowerCamelCase ,self )._prepare_local(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) __lowercase = tf.constant( self.weight_decay_rate ,name='''adam_weight_decay_rate''' ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]: '''simple docstring''' __lowercase = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] ,use_locking=self._use_locking ,) return tf.no_op() def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> int: '''simple docstring''' __lowercase , __lowercase = list(zip(*_lowerCamelCase ) ) return super(_lowerCamelCase ,self ).apply_gradients(zip(_lowerCamelCase ,_lowerCamelCase ) ,name=_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]: '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowercase = apply_state or {} __lowercase = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowercase = self._fallback_apply_state(_lowerCamelCase ,_lowerCamelCase ) __lowercase = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> str: '''simple docstring''' __lowercase , __lowercase = self._get_lr(var.device ,var.dtype.base_dtype ,_lowerCamelCase ) __lowercase = self._decay_weights_op(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) with tf.control_dependencies([decay] ): return super(_lowerCamelCase ,self )._resource_apply_dense(_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> Union[str, Any]: '''simple docstring''' __lowercase , __lowercase = self._get_lr(var.device ,var.dtype.base_dtype ,_lowerCamelCase ) __lowercase = self._decay_weights_op(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) with tf.control_dependencies([decay] ): return super(_lowerCamelCase ,self )._resource_apply_sparse(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ) -> str: '''simple docstring''' __lowercase = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple: '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(_lowerCamelCase ,_lowerCamelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(_lowerCamelCase ,_lowerCamelCase ) is not None: return False return True class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self ) -> List[Any]: '''simple docstring''' __lowercase = [] __lowercase = None @property def _UpperCAmelCase (self ) -> Optional[Any]: '''simple docstring''' if self._accum_steps is None: __lowercase = tf.Variable( tf.constant(0 ,dtype=tf.intaa ) ,trainable=_lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,) return self._accum_steps.value() @property def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__(self ,_lowerCamelCase ) -> Optional[int]: '''simple docstring''' if not self._gradients: __lowercase = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(_lowerCamelCase ) ,trainable=_lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,) if gradient is not None else gradient for gradient in gradients ] ) if len(_lowerCamelCase ) != len(self._gradients ): raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(_lowerCamelCase )}" ) for accum_gradient, gradient in zip(self._gradients ,_lowerCamelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(_lowerCamelCase ) self._accum_steps.assign_add(1 ) def _UpperCAmelCase (self ) -> Any: '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(_lowerCamelCase ) )
502
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right _SCREAMING_SNAKE_CASE = 5_0_0_0_3 _SCREAMING_SNAKE_CASE = 5_0_0_0_2 @require_sentencepiece @require_tokenizers class __lowercase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' a : Union[str, Any] = PLBartTokenizer a : Optional[Any] = None a : Optional[Any] = False def _UpperCAmelCase (self ) -> Dict: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''base''' ,keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase (self ) -> int: '''simple docstring''' __lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''base''' ,keep_accents=_lowerCamelCase ) __lowercase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) __lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] ,) __lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) __lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] ,) __lowercase = tokenizer.vocab_size __lowercase = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 4 ,_lowerCamelCase )] self.assertListEqual(_lowerCamelCase ,['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] ) __lowercase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' __lowercase = tokenizer(_lowerCamelCase ).input_ids self.assertEqual( tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) ,_lowerCamelCase ,) def _UpperCAmelCase (self ) -> int: '''simple docstring''' __lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''multi''' ,keep_accents=_lowerCamelCase ) __lowercase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) __lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] ,) __lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) __lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] ,) __lowercase = tokenizer.vocab_size __lowercase = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 7 ,_lowerCamelCase )] self.assertListEqual( _lowerCamelCase ,['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] ) __lowercase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' __lowercase = tokenizer(_lowerCamelCase ).input_ids self.assertEqual( tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) ,_lowerCamelCase ,) @require_torch @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase ): '''simple docstring''' a : Tuple = "uclanlp/plbart-python-en_XX" a : List[Any] = [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] a : List[Any] = [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] a : Union[str, Any] = [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def _UpperCAmelCase (cls ) -> Dict: '''simple docstring''' __lowercase = PLBartTokenizer.from_pretrained( cls.checkpoint_name ,language_codes='''base''' ,src_lang='''python''' ,tgt_lang='''en_XX''' ) __lowercase = 1 return cls def _UpperCAmelCase (self ) -> List[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] ,50001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] ,50002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] ,50003 ) def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' __lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase ) def _UpperCAmelCase (self ) -> List[Any]: '''simple docstring''' self.assertIn(_lowerCamelCase ,self.tokenizer.all_special_ids ) __lowercase = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] __lowercase = self.tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCamelCase ) self.assertEqual(_lowerCamelCase ,_lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token ,_lowerCamelCase ) def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' __lowercase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20] self.assertIsInstance(src_text[0] ,_lowerCamelCase ) __lowercase = 10 __lowercase = self.tokenizer(_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2] ,2 ) self.assertEqual(ids[-1] ,_lowerCamelCase ) self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase ) def _UpperCAmelCase (self ) -> List[str]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) ,[50004, 50001] ) def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase = tempfile.mkdtemp() __lowercase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCamelCase ) __lowercase = PLBartTokenizer.from_pretrained(_lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCamelCase ) @require_torch def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,return_tensors='''pt''' ) __lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] ,_lowerCamelCase ) self.assertEqual(batch.decoder_input_ids[1][-1] ,2 ) self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] ) @require_torch def _UpperCAmelCase (self ) -> Tuple: '''simple docstring''' __lowercase = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,) __lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id ) self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase ) self.assertEqual((2, 26) ,batch.input_ids.shape ) self.assertEqual((2, 26) ,batch.attention_mask.shape ) __lowercase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase ) self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] ) def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = self.tokenizer(self.src_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' ) __lowercase = self.tokenizer( text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=10 ,return_tensors='''pt''' ) __lowercase = targets['''input_ids'''] __lowercase = shift_tokens_right(_lowerCamelCase ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _UpperCAmelCase (self ) -> Union[str, Any]: '''simple docstring''' __lowercase = self.tokenizer._build_translation_inputs( '''A test''' ,return_tensors='''pt''' ,src_lang='''en_XX''' ,tgt_lang='''java''' ) self.assertEqual( nested_simplify(_lowerCamelCase ) ,{ # A, test, EOS, en_XX '''input_ids''': [[150, 242, 2, 50003]], '''attention_mask''': [[1, 1, 1, 1]], # java '''forced_bos_token_id''': 50001, } ,)
502
1
'''simple docstring''' A : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A : Optional[Any] = { 0: """Sunday""", 1: """Monday""", 2: """Tuesday""", 3: """Wednesday""", 4: """Thursday""", 5: """Friday""", 6: """Saturday""", } def snake_case_ ( a__ : int ,a__ : int ,a__ : int ): """simple docstring""" assert len(str(a__ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __lowercase = year // 1_00 __lowercase = (5 * (century % 4) + 2) % 7 __lowercase = year % 1_00 __lowercase = centurian % 12 __lowercase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __lowercase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) __lowercase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
163
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() A : List[Any] = logging.get_logger(__name__) A : str = """The Nymphenburg Palace is a beautiful palace in Munich!""" def snake_case_ ( a__ : str ,a__ : str ): """simple docstring""" __lowercase = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 10_24, """hidden_size""": 7_68, """max_length""": 5_12, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 10_24, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } __lowercase = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __lowercase = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] ,num_layers=predefined_args["""num_layers"""] ,units=predefined_args["""units"""] ,hidden_size=predefined_args["""hidden_size"""] ,max_length=predefined_args["""max_length"""] ,num_heads=predefined_args["""num_heads"""] ,scaled=predefined_args["""scaled"""] ,dropout=predefined_args["""dropout"""] ,output_attention=a__ ,output_all_encodings=a__ ,use_residual=predefined_args["""use_residual"""] ,activation=predefined_args.get("""activation""" ,"""gelu""" ) ,layer_norm_eps=predefined_args.get("""layer_norm_eps""" ,a__ ) ,) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __lowercase = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab __lowercase = os.path.join(get_home_dir() ,"""models""" ) __lowercase = _load_vocab(a__ ,a__ ,a__ ,cls=a__ ) __lowercase = nlp.model.BERTModel( a__ ,len(a__ ) ,units=predefined_args["""units"""] ,embed_size=predefined_args["""embed_size"""] ,embed_dropout=predefined_args["""embed_dropout"""] ,word_embed=predefined_args["""word_embed"""] ,use_pooler=a__ ,use_token_type_embed=a__ ,token_type_vocab_size=predefined_args["""token_type_vocab_size"""] ,use_classifier=a__ ,use_decoder=a__ ,) original_bort.load_parameters(a__ ,cast_dtype=a__ ,ignore_extra=a__ ) __lowercase = original_bort._collect_params_with_prefix() # Build our config 🤗 __lowercase = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.0_2, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(a__ ), } __lowercase = BertConfig.from_dict(a__ ) __lowercase = BertForMaskedLM(a__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(a__ : Union[str, Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(a__ : List[str] ,a__ : Union[str, Any] ): __lowercase = hf_param.shape __lowercase = to_torch(params[gluon_param] ) __lowercase = gluon_param.shape assert ( shape_hf == shape_gluon ), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param __lowercase = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight ,"""word_embed.0.weight""" ) __lowercase = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight ,"""encoder.position_weight""" ) __lowercase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias ,"""encoder.layer_norm.beta""" ) __lowercase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight ,"""encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __lowercase = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __lowercase = hf_bort_model.bert.encoder.layer[i] # self attention __lowercase = layer.attention.self __lowercase = check_and_map_params( self_attn.key.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) __lowercase = check_and_map_params( self_attn.key.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) __lowercase = check_and_map_params( self_attn.query.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) __lowercase = check_and_map_params( self_attn.query.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) __lowercase = check_and_map_params( self_attn.value.bias.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) __lowercase = check_and_map_params( self_attn.value.weight.data ,f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output __lowercase = layer.attention.output __lowercase = check_and_map_params( self_output.dense.bias ,f'encoder.transformer_cells.{i}.proj.bias' ) __lowercase = check_and_map_params( self_output.dense.weight ,f'encoder.transformer_cells.{i}.proj.weight' ) __lowercase = check_and_map_params( self_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.layer_norm.beta' ) __lowercase = check_and_map_params( self_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate __lowercase = layer.intermediate __lowercase = check_and_map_params( intermediate.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) __lowercase = check_and_map_params( intermediate.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output __lowercase = layer.output __lowercase = check_and_map_params( bert_output.dense.bias ,f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) __lowercase = check_and_map_params( bert_output.dense.weight ,f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) __lowercase = check_and_map_params( bert_output.LayerNorm.bias ,f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) __lowercase = check_and_map_params( bert_output.LayerNorm.weight ,f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __lowercase = RobertaTokenizer.from_pretrained("""roberta-base""" ) __lowercase = tokenizer.encode_plus(a__ )["""input_ids"""] # Get gluon output __lowercase = mx.nd.array([input_ids] ) __lowercase = original_bort(inputs=a__ ,token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(a__ ) __lowercase = BertModel.from_pretrained(a__ ) hf_bort_model.eval() __lowercase = tokenizer.encode_plus(a__ ,return_tensors="""pt""" ) __lowercase = hf_bort_model(**a__ )[0] __lowercase = output_gluon[0].asnumpy() __lowercase = output_hf[0].detach().numpy() __lowercase = np.max(np.abs(hf_layer - gluon_layer ) ).item() __lowercase = np.allclose(a__ ,a__ ,atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" ,a__ ) if __name__ == "__main__": A : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A : Any = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
163
1
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( A , unittest.TestCase ): lowerCAmelCase_ = GPTSanJapaneseTokenizer lowerCAmelCase_ = False lowerCAmelCase_ = {'do_clean_text': False, 'add_prefix_space': False} def lowerCamelCase_ ( self : Union[str, Any] ): super().setUp() # fmt: off _lowerCamelCase : Dict = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"] # fmt: on _lowerCamelCase : Union[str, Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 _lowerCamelCase : int = {"unk_token": "<unk>"} _lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] ) _lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["emoji_file"] ) with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) with open(self.emoji_file,"w" ) as emoji_writer: emoji_writer.write(json.dumps(__A ) ) def lowerCamelCase_ ( self : List[Any],**__A : Optional[int] ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname,**__A ) def lowerCamelCase_ ( self : List[str],__A : int ): _lowerCamelCase : List[Any] = "こんにちは、世界。 \nこんばんは、㔺界。😀" _lowerCamelCase : Optional[int] = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def lowerCamelCase_ ( self : Dict,__A : Optional[int] ): _lowerCamelCase , _lowerCamelCase : int = self.get_input_output_texts(__A ) _lowerCamelCase : Optional[Any] = tokenizer.encode(__A,add_special_tokens=__A ) _lowerCamelCase : int = tokenizer.decode(__A,clean_up_tokenization_spaces=__A ) return text, ids def lowerCamelCase_ ( self : int ): pass # TODO add if relevant def lowerCamelCase_ ( self : Tuple ): pass # TODO add if relevant def lowerCamelCase_ ( self : Dict ): pass # TODO add if relevant def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase : Optional[Any] = self.get_tokenizer() # Testing tokenization _lowerCamelCase : Union[str, Any] = "こんにちは、世界。 こんばんは、㔺界。" _lowerCamelCase : List[Any] = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] _lowerCamelCase : str = tokenizer.tokenize(__A ) self.assertListEqual(__A,__A ) # Testing conversion to ids without special tokens _lowerCamelCase : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] _lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A,__A ) # Testing conversion to ids with special tokens _lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token] _lowerCamelCase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] _lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual(__A,__A ) def lowerCamelCase_ ( self : Optional[int] ): _lowerCamelCase : List[Any] = self.get_tokenizer() # Testing tokenization _lowerCamelCase : Optional[Any] = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。" _lowerCamelCase : Tuple = "こんにちは、、、、世界。こんばんは、、、、世界。" _lowerCamelCase : Any = tokenizer.encode(__A ) _lowerCamelCase : int = tokenizer.decode(__A ) self.assertEqual(__A,__A ) @slow def lowerCamelCase_ ( self : Any ): _lowerCamelCase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) # Testing tokenization _lowerCamelCase : Optional[Any] = "こんにちは、世界。" _lowerCamelCase : List[str] = "こんばんは、㔺界。😀" _lowerCamelCase : str = "こんにちは、世界。こんばんは、世界。😀" _lowerCamelCase : Tuple = tokenizer.encode(prefix_text + input_text ) _lowerCamelCase : List[Any] = tokenizer.encode("",prefix_text=prefix_text + input_text ) _lowerCamelCase : Optional[Any] = tokenizer.encode(__A,prefix_text=__A ) _lowerCamelCase : Optional[Any] = tokenizer.decode(__A ) _lowerCamelCase : List[str] = tokenizer.decode(__A ) _lowerCamelCase : Dict = tokenizer.decode(__A ) self.assertEqual(__A,__A ) self.assertEqual(__A,__A ) self.assertEqual(__A,__A ) @slow def lowerCamelCase_ ( self : Tuple ): _lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) # Testing tokenization _lowerCamelCase : Optional[int] = "こんにちは、世界。" _lowerCamelCase : Optional[Any] = "こんばんは、㔺界。😀" _lowerCamelCase : Any = len(tokenizer.encode(__A ) ) - 2 _lowerCamelCase : Dict = len(tokenizer.encode(__A ) ) - 2 _lowerCamelCase : int = [1] + [0] * (len_prefix + len_text + 1) _lowerCamelCase : Optional[Any] = [1] * (len_prefix + len_text + 1) + [0] _lowerCamelCase : List[str] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) _lowerCamelCase : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids _lowerCamelCase : List[Any] = tokenizer("",prefix_text=prefix_text + input_text ).token_type_ids _lowerCamelCase : str = tokenizer(__A,prefix_text=__A ).token_type_ids self.assertListEqual(__A,__A ) self.assertListEqual(__A,__A ) self.assertListEqual(__A,__A ) @slow def lowerCamelCase_ ( self : Dict ): _lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) _lowerCamelCase : Any = tokenizer.encode("あンいワ" ) _lowerCamelCase : List[Any] = tokenizer.encode("",prefix_text="あンいワ" ) _lowerCamelCase : str = tokenizer.encode("いワ",prefix_text="あン" ) self.assertEqual(tokenizer.decode(__A ),tokenizer.decode(__A ) ) self.assertEqual(tokenizer.decode(__A ),tokenizer.decode(__A ) ) self.assertNotEqual(__A,__A ) self.assertNotEqual(__A,__A ) self.assertEqual(x_token_a[1],x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1],x_token_a[3] ) # SEG token @slow def lowerCamelCase_ ( self : Dict ): _lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" ) _lowerCamelCase : str = [["武田信玄", "は、"], ["織田信長", "の配下の、"]] _lowerCamelCase : List[Any] = tokenizer(__A,padding=__A ) _lowerCamelCase : str = tokenizer.batch_encode_plus(__A,padding=__A ) # fmt: off _lowerCamelCase : Dict = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] _lowerCamelCase : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] _lowerCamelCase : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids,__A ) self.assertListEqual(x_token.token_type_ids,__A ) self.assertListEqual(x_token.attention_mask,__A ) self.assertListEqual(x_token_a.input_ids,__A ) self.assertListEqual(x_token_a.token_type_ids,__A ) self.assertListEqual(x_token_a.attention_mask,__A ) def lowerCamelCase_ ( self : Any ): # Intentionally convert some words to accommodate character fluctuations unique to Japanese pass def lowerCamelCase_ ( self : Any ): # tokenizer has no padding token pass
44
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: UpperCAmelCase_, UpperCAmelCase_= FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_, UpperCAmelCase_= FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_= controlnet_params UpperCAmelCase_= """bird""" UpperCAmelCase_= jax.device_count() UpperCAmelCase_= pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase_= load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) UpperCAmelCase_= pipe.prepare_image_inputs([canny_image] * num_samples ) UpperCAmelCase_= jax.random.PRNGKey(0 ) UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() ) UpperCAmelCase_= replicate(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase_= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase_= images[0, 253:256, 253:256, -1] UpperCAmelCase_= jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase_= jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_, UpperCAmelCase_= FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_, UpperCAmelCase_= FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=__UpperCAmelCase , from_pt=__UpperCAmelCase , dtype=jnp.bfloataa ) UpperCAmelCase_= controlnet_params UpperCAmelCase_= """Chef in the kitchen""" UpperCAmelCase_= jax.device_count() UpperCAmelCase_= pipe.prepare_text_inputs([prompts] * num_samples ) UpperCAmelCase_= load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) UpperCAmelCase_= pipe.prepare_image_inputs([pose_image] * num_samples ) UpperCAmelCase_= jax.random.PRNGKey(0 ) UpperCAmelCase_= jax.random.split(__UpperCAmelCase , jax.device_count() ) UpperCAmelCase_= replicate(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= shard(__UpperCAmelCase ) UpperCAmelCase_= pipe( prompt_ids=__UpperCAmelCase , image=__UpperCAmelCase , params=__UpperCAmelCase , prng_seed=__UpperCAmelCase , num_inference_steps=50 , jit=__UpperCAmelCase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) UpperCAmelCase_= images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase_= images[0, 253:256, 253:256, -1] UpperCAmelCase_= jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase_= jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
593
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[str] , *UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) A__ = eval_examples A__ = post_process_function def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Optional[Dataset] = None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "eval" , **UpperCAmelCase__ : Union[str, Any] , ) ->Dict[str, float]: '''simple docstring''' A__ = gen_kwargs.copy() A__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length ) A__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams ) A__ = gen_kwargs A__ = self.eval_dataset if eval_dataset is None else eval_dataset A__ = self.get_eval_dataloader(UpperCAmelCase__) A__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. A__ = self.compute_metrics A__ = None A__ = time.time() A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A__ = eval_loop( UpperCAmelCase__ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , ) finally: A__ = compute_metrics A__ = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default A__ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) A__ = self.compute_metrics(UpperCAmelCase__) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"""{metric_key_prefix}_"""): A__ = metrics.pop(UpperCAmelCase__) metrics.update(output.metrics) else: A__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCAmelCase__) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__) return metrics def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str = "test" , **UpperCAmelCase__ : Optional[Any]) ->Dict: '''simple docstring''' A__ = gen_kwargs.copy() A__ = self.get_test_dataloader(UpperCAmelCase__) # Temporarily disable metric computation, we will do it in the loop here. A__ = self.compute_metrics A__ = None A__ = time.time() A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: A__ = eval_loop( UpperCAmelCase__ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , ) finally: A__ = compute_metrics A__ = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output A__ = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , '''predict''') A__ = self.compute_metrics(UpperCAmelCase__) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(f"""{metric_key_prefix}_"""): A__ = metrics.pop(UpperCAmelCase__) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__)
177
from collections import namedtuple import requests from lxml import html # type: ignore _lowerCamelCase : Optional[int] = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE ( lowercase_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data: """simple docstring""" A__ = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(lowercase_ ).content ).xpath(lowercase_ ) ) _lowerCamelCase : Optional[int] = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
177
1
'''simple docstring''' def __snake_case ( lowerCAmelCase : int = 10 , lowerCAmelCase : int = 22 ): __UpperCAmelCase = range(1 , lowerCAmelCase ) __UpperCAmelCase = range(1 , lowerCAmelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f"{solution(10, 22) = }")
396
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : Optional[int] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _UpperCamelCase : str = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } _UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __snake_case ( ): __UpperCAmelCase = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) __UpperCAmelCase = bs[:] __UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs] return dict(zip(lowerCAmelCase , lowerCAmelCase ) ) def __snake_case ( lowerCAmelCase : List[Any] ): __UpperCAmelCase = set() __UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase = char return pairs class _lowercase( _lowerCamelCase ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self: Union[str, Any] ,a: Tuple ,a: Dict ,a: Dict="replace" ,a: int="<s>" ,a: List[str]="</s>" ,a: Any="</s>" ,a: str="<s>" ,a: Dict="<unk>" ,a: Union[str, Any]="<pad>" ,a: Optional[int]="<mask>" ,a: int=False ,**a: int ,): __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else bos_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else eos_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else sep_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else cls_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else unk_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else mask_token super().__init__( errors=a ,bos_token=a ,eos_token=a ,unk_token=a ,sep_token=a ,cls_token=a ,pad_token=a ,mask_token=a ,add_prefix_space=a ,**a ,) with open(a ,encoding='utf-8' ) as vocab_handle: __UpperCAmelCase = json.load(a ) __UpperCAmelCase = {v: k for k, v in self.encoder.items()} __UpperCAmelCase = errors # how to handle errors in decoding __UpperCAmelCase = bytes_to_unicode() __UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(a ,encoding='utf-8' ) as merges_handle: __UpperCAmelCase = merges_handle.read().split('\n' )[1:-1] __UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase = dict(zip(a ,range(len(a ) ) ) ) __UpperCAmelCase = {} __UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def snake_case ( self: Optional[Any] ): return len(self.encoder ) def snake_case ( self: Optional[Any] ): return dict(self.encoder ,**self.added_tokens_encoder ) def snake_case ( self: Optional[int] ,a: Optional[int] ): if token in self.cache: return self.cache[token] __UpperCAmelCase = tuple(a ) __UpperCAmelCase = get_pairs(a ) if not pairs: return token while True: __UpperCAmelCase = min(a ,key=lambda a : self.bpe_ranks.get(a ,float('inf' ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase = bigram __UpperCAmelCase = [] __UpperCAmelCase = 0 while i < len(a ): try: __UpperCAmelCase = word.index(a ,a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase = j if word[i] == first and i < len(a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase = tuple(a ) __UpperCAmelCase = new_word if len(a ) == 1: break else: __UpperCAmelCase = get_pairs(a ) __UpperCAmelCase = ' '.join(a ) __UpperCAmelCase = word return word def snake_case ( self: int ,a: str ): __UpperCAmelCase = [] for token in re.findall(self.pat ,a ): __UpperCAmelCase = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) ) return bpe_tokens def snake_case ( self: Optional[Any] ,a: Union[str, Any] ): return self.encoder.get(a ,self.encoder.get(self.unk_token ) ) def snake_case ( self: Any ,a: Union[str, Any] ): return self.decoder.get(a ) def snake_case ( self: Dict ,a: Union[str, Any] ): __UpperCAmelCase = ''.join(a ) __UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors ) return text def snake_case ( self: Optional[Any] ,a: str ,a: Optional[str] = None ): if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase = os.path.join( a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCAmelCase = os.path.join( a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(a ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a ,ensure_ascii=a ) + '\n' ) __UpperCAmelCase = 0 with open(a ,'w' ,encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) __UpperCAmelCase = token_index writer.write(' '.join(a ) + '\n' ) index += 1 return vocab_file, merge_file def snake_case ( self: List[str] ,a: List[int] ,a: Optional[List[int]] = None ,a: bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a ,token_ids_a=a ,already_has_special_tokens=a ) if token_ids_a is None: return [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1] def snake_case ( self: Optional[int] ,a: List[int] ,a: Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self: Dict ,a: List[Any] ,a: Optional[int]=False ,**a: Optional[Any] ): __UpperCAmelCase = kwargs.pop('add_prefix_space' ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()): __UpperCAmelCase = ' ' + text return (text, kwargs) def snake_case ( self: Tuple ,a: List[int] ,a: Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def snake_case ( self: Any ,a: "Conversation" ): __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(a ) __UpperCAmelCase = ' '.join(a ) __UpperCAmelCase = self.encode(a ) if len(a ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
396
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowerCamelCase (_SCREAMING_SNAKE_CASE ): '''simple docstring''' a = "unispeech-sat" def __init__( self : List[str] , _snake_case : Optional[int]=32 , _snake_case : Union[str, Any]=768 , _snake_case : Optional[int]=12 , _snake_case : List[str]=12 , _snake_case : Optional[int]=3072 , _snake_case : List[str]="gelu" , _snake_case : Optional[Any]=0.1 , _snake_case : List[str]=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[Any]=0.0 , _snake_case : str=0.1 , _snake_case : Dict=0.1 , _snake_case : Any=0.02 , _snake_case : List[Any]=1e-5 , _snake_case : List[Any]="group" , _snake_case : Union[str, Any]="gelu" , _snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : int=(5, 2, 2, 2, 2, 2, 2) , _snake_case : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Dict=False , _snake_case : Union[str, Any]=128 , _snake_case : Any=16 , _snake_case : Tuple=False , _snake_case : Optional[int]=True , _snake_case : Tuple=0.05 , _snake_case : Dict=10 , _snake_case : Tuple=2 , _snake_case : Dict=0.0 , _snake_case : Dict=10 , _snake_case : Optional[Any]=0 , _snake_case : int=320 , _snake_case : Optional[Any]=2 , _snake_case : Dict=0.1 , _snake_case : List[Any]=100 , _snake_case : str=256 , _snake_case : Dict=256 , _snake_case : Dict=0.1 , _snake_case : List[Any]="mean" , _snake_case : Tuple=False , _snake_case : List[Any]=False , _snake_case : int=256 , _snake_case : str=(512, 512, 512, 512, 1500) , _snake_case : Optional[Any]=(5, 3, 3, 1, 1) , _snake_case : Optional[int]=(1, 2, 3, 1, 1) , _snake_case : str=512 , _snake_case : Tuple=0 , _snake_case : Union[str, Any]=1 , _snake_case : Any=2 , _snake_case : str=504 , **_snake_case : Optional[Any] , ) -> List[str]: super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case ) SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = feat_extract_norm SCREAMING_SNAKE_CASE__ = feat_extract_activation SCREAMING_SNAKE_CASE__ = list(_snake_case ) SCREAMING_SNAKE_CASE__ = list(_snake_case ) SCREAMING_SNAKE_CASE__ = list(_snake_case ) SCREAMING_SNAKE_CASE__ = conv_bias SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE__ = len(self.conv_dim ) SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout SCREAMING_SNAKE_CASE__ = attention_dropout SCREAMING_SNAKE_CASE__ = activation_dropout SCREAMING_SNAKE_CASE__ = feat_proj_dropout SCREAMING_SNAKE_CASE__ = final_dropout SCREAMING_SNAKE_CASE__ = layerdrop SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = num_clusters SCREAMING_SNAKE_CASE__ = do_stable_layer_norm SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE__ = apply_spec_augment SCREAMING_SNAKE_CASE__ = mask_time_prob SCREAMING_SNAKE_CASE__ = mask_time_length SCREAMING_SNAKE_CASE__ = mask_time_min_masks SCREAMING_SNAKE_CASE__ = mask_feature_prob SCREAMING_SNAKE_CASE__ = mask_feature_length SCREAMING_SNAKE_CASE__ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE__ = num_codevectors_per_group SCREAMING_SNAKE_CASE__ = num_codevector_groups SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout SCREAMING_SNAKE_CASE__ = num_negatives SCREAMING_SNAKE_CASE__ = codevector_dim SCREAMING_SNAKE_CASE__ = proj_codevector_dim SCREAMING_SNAKE_CASE__ = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE__ = ctc_loss_reduction SCREAMING_SNAKE_CASE__ = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE__ = list(_snake_case ) SCREAMING_SNAKE_CASE__ = list(_snake_case ) SCREAMING_SNAKE_CASE__ = list(_snake_case ) SCREAMING_SNAKE_CASE__ = xvector_output_dim @property def lowerCAmelCase_ ( self : str ) -> Tuple: return functools.reduce(operator.mul , self.conv_stride , 1 )
538
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCamelCase (_SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[str] , _snake_case : Callable , _snake_case : Optional[Features] = None , _snake_case : str = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[dict] = None , _snake_case : Optional[int] = None , **_snake_case : Dict , ) -> Any: super().__init__( features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , num_proc=_snake_case , **_snake_case , ) SCREAMING_SNAKE_CASE__ = Generator( cache_dir=_snake_case , features=_snake_case , generator=_snake_case , gen_kwargs=_snake_case , **_snake_case , ) def lowerCAmelCase_ ( self : List[str] ) -> Optional[Any]: # Build iterable dataset if self.streaming: SCREAMING_SNAKE_CASE__ = self.builder.as_streaming_dataset(split="train" ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None self.builder.download_and_prepare( download_config=_snake_case , download_mode=_snake_case , verification_mode=_snake_case , base_path=_snake_case , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE__ = self.builder.as_dataset( split="train" , verification_mode=_snake_case , in_memory=self.keep_in_memory ) return dataset
538
1
import warnings from .generation import TFGenerationMixin class __a ( UpperCAmelCase ): # warning at import time warnings.warn( 'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ' 'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , UpperCAmelCase , )
618
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase__ :List[str] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class __a : _a : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files.'} ) _a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} ) _a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} ) _a : Optional[float] = field( default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} ) _a : Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _a : Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = {} if self.train_dir is not None: _UpperCAmelCase = self.train_dir if self.validation_dir is not None: _UpperCAmelCase = self.validation_dir _UpperCAmelCase = data_files if data_files else None @dataclass class __a : _a : str = field( default=UpperCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) _a : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) _a : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _a : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} ) _a : bool = field( default=UpperCAmelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) _a : float = field( default=0.7_5 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) _a : bool = field( default=UpperCAmelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class __a ( UpperCAmelCase ): _a : float = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def lowerCAmelCase__ ( a__: Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def lowerCAmelCase__ ( ) -> Tuple: '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , a__ , a__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(a__ ) transformers.utils.logging.set_verbosity(a__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _UpperCAmelCase = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0: _UpperCAmelCase = ds['train'].train_test_split(data_args.train_val_split ) _UpperCAmelCase = split['train'] _UpperCAmelCase = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _UpperCAmelCase = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **a__ ) elif model_args.model_name_or_path: _UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **a__ ) else: _UpperCAmelCase = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **a__ ) elif model_args.model_name_or_path: _UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **a__ ) else: _UpperCAmelCase = ViTImageProcessor() # create model if model_args.model_name_or_path: _UpperCAmelCase = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _UpperCAmelCase = ViTMAEForPreTraining(a__ ) if training_args.do_train: _UpperCAmelCase = ds['train'].column_names else: _UpperCAmelCase = ds['validation'].column_names if data_args.image_column_name is not None: _UpperCAmelCase = data_args.image_column_name elif "image" in column_names: _UpperCAmelCase = 'image' elif "img" in column_names: _UpperCAmelCase = 'img' else: _UpperCAmelCase = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _UpperCAmelCase = image_processor.size['shortest_edge'] else: _UpperCAmelCase = (image_processor.size['height'], image_processor.size['width']) _UpperCAmelCase = Compose( [ Lambda(lambda a__ : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(a__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(a__: Optional[int] ): _UpperCAmelCase = [transforms(a__ ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _UpperCAmelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(a__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _UpperCAmelCase = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(a__ ) # Compute absolute learning rate _UpperCAmelCase = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _UpperCAmelCase = training_args.base_learning_rate * total_train_batch_size / 2_5_6 # Initialize our trainer _UpperCAmelCase = Trainer( model=a__ , args=a__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=a__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('eval' , a__ ) trainer.save_metrics('eval' , a__ ) # Write model card and (optionally) push to hub _UpperCAmelCase = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**a__ ) else: trainer.create_model_card(**a__ ) def lowerCAmelCase__ ( a__: Tuple ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
618
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a_ = logging.get_logger(__name__) a_ = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class A_ (UpperCamelCase__ ): """simple docstring""" a_ : int = """t5""" a_ : Optional[int] = ["""past_key_values"""] a_ : Dict = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , A=3_2128 , A=512 , A=64 , A=2048 , A=6 , A=None , A=8 , A=32 , A=128 , A=0.1 , A=1E-6 , A=1.0 , A="relu" , A=True , A=True , A=0 , A=1 , **A , ): _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Tuple = d_model _lowerCamelCase : str = d_kv _lowerCamelCase : List[str] = d_ff _lowerCamelCase : int = num_layers _lowerCamelCase : str = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _lowerCamelCase : Any = num_heads _lowerCamelCase : Dict = relative_attention_num_buckets _lowerCamelCase : int = relative_attention_max_distance _lowerCamelCase : Optional[int] = dropout_rate _lowerCamelCase : Optional[Any] = layer_norm_epsilon _lowerCamelCase : str = initializer_factor _lowerCamelCase : Optional[int] = feed_forward_proj _lowerCamelCase : Optional[Any] = use_cache _lowerCamelCase : Any = self.feed_forward_proj.split('-' ) _lowerCamelCase : List[str] = act_info[-1] _lowerCamelCase : Optional[int] = act_info[0] == 'gated' if len(__A ) > 1 and act_info[0] != "gated" or len(__A ) > 2: raise ValueError( F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer." 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _lowerCamelCase : Any = 'gelu_new' super().__init__( pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , **__A , ) class A_ (UpperCamelCase__ ): """simple docstring""" @property def _lowerCAmelCase ( self ): _lowerCamelCase : Dict = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: _lowerCamelCase : Tuple = 'past_encoder_sequence + sequence' _lowerCamelCase : Union[str, Any] = {0: 'batch'} _lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _lowerCamelCase : str = {0: 'batch', 1: 'decoder_sequence'} _lowerCamelCase : Any = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__A , direction='inputs' ) return common_inputs @property def _lowerCAmelCase ( self ): return 13
715
"""simple docstring""" import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase_ ( __a : Any ): '''simple docstring''' _lowerCamelCase : List[str] = tmp_path / 'file.csv' _lowerCamelCase : List[str] = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(__a , 'w' ) as f: f.write(__a ) return str(__a ) @pytest.fixture def UpperCAmelCase_ ( __a : List[str] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv' _lowerCamelCase : int = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(__a , 'w' ) as f: f.write(__a ) return str(__a ) @pytest.fixture def UpperCAmelCase_ ( __a : Union[str, Any] , __a : Optional[Any] ): '''simple docstring''' _lowerCamelCase : Tuple = tmp_path / 'csv_with_image.csv' _lowerCamelCase : Tuple = textwrap.dedent( f"\\n image\n {image_file}\n " ) with open(__a , 'w' ) as f: f.write(__a ) return str(__a ) @pytest.fixture def UpperCAmelCase_ ( __a : Any ): '''simple docstring''' _lowerCamelCase : List[Any] = tmp_path / 'csv_with_label.csv' _lowerCamelCase : Union[str, Any] = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(__a , 'w' ) as f: f.write(__a ) return str(__a ) @pytest.fixture def UpperCAmelCase_ ( __a : Tuple ): '''simple docstring''' _lowerCamelCase : str = tmp_path / 'csv_with_int_list.csv' _lowerCamelCase : int = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(__a , 'w' ) as f: f.write(__a ) return str(__a ) def UpperCAmelCase_ ( __a : int , __a : Optional[Any] , __a : List[str] ): '''simple docstring''' _lowerCamelCase : int = Csv() _lowerCamelCase : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(__a , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(__a ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase_ ( __a : Optional[int] ): '''simple docstring''' with open(__a , encoding='utf-8' ) as f: _lowerCamelCase : Any = f.read().splitlines()[1] _lowerCamelCase : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) _lowerCamelCase : int = csv._generate_tables([[csv_file_with_image]] ) _lowerCamelCase : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() _lowerCamelCase : Union[str, Any] = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase_ ( __a : Union[str, Any] ): '''simple docstring''' with open(__a , encoding='utf-8' ) as f: _lowerCamelCase : List[Any] = f.read().splitlines()[1:] _lowerCamelCase : Tuple = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) _lowerCamelCase : Any = csv._generate_tables([[csv_file_with_label]] ) _lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() _lowerCamelCase : str = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(__a ) for label in labels] def UpperCAmelCase_ ( __a : Optional[int] ): '''simple docstring''' _lowerCamelCase : Any = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda __a : [int(__a ) for i in x.split()]} ) _lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_int_list]] ) _lowerCamelCase : Any = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) _lowerCamelCase : Optional[Any] = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
349
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
229
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCAmelCase__ ( metaclass=lowercase__ ): """simple docstring""" __UpperCAmelCase : List[str] = ['''keras_nlp'''] def __init__( self : Union[str, Any] ,*_a : List[Any] ,**_a : int ): '''simple docstring''' requires_backends(self ,['keras_nlp'] )
229
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig __UpperCAmelCase = logging.get_logger(__name__) # General docstring __UpperCAmelCase = 'ResNetConfig' # Base docstring __UpperCAmelCase = 'microsoft/resnet-50' __UpperCAmelCase = [1, 2048, 7, 7] # Image classification docstring __UpperCAmelCase = 'microsoft/resnet-50' __UpperCAmelCase = 'tiger cat' __UpperCAmelCase = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 3 ,__SCREAMING_SNAKE_CASE = 1 ,__SCREAMING_SNAKE_CASE = "relu" ): super().__init__() SCREAMING_SNAKE_CASE : Any = nn.Convad( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,kernel_size=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,padding=kernel_size // 2 ,bias=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.BatchNormad(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = ACTaFN[activation] if activation is not None else nn.Identity() def __a ( self ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[Any] = self.convolution(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = self.normalization(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = self.activation(__SCREAMING_SNAKE_CASE ) return hidden_state class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ): super().__init__() SCREAMING_SNAKE_CASE : Tuple = ResNetConvLayer( config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act ) SCREAMING_SNAKE_CASE : List[str] = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 ) SCREAMING_SNAKE_CASE : Tuple = config.num_channels def __a ( self ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Optional[int] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) SCREAMING_SNAKE_CASE : Any = self.embedder(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = self.pooler(__SCREAMING_SNAKE_CASE ) return embedding class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 2 ): super().__init__() SCREAMING_SNAKE_CASE : List[str] = nn.Convad(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,kernel_size=1 ,stride=__SCREAMING_SNAKE_CASE ,bias=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(__SCREAMING_SNAKE_CASE ) def __a ( self ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[str] = self.convolution(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = self.normalization(__SCREAMING_SNAKE_CASE ) return hidden_state class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 1 ,__SCREAMING_SNAKE_CASE = "relu" ): super().__init__() SCREAMING_SNAKE_CASE : List[Any] = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE : List[str] = ( ResNetShortCut(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity() ) SCREAMING_SNAKE_CASE : int = nn.Sequential( ResNetConvLayer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ) ,ResNetConvLayer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,activation=__SCREAMING_SNAKE_CASE ) ,) SCREAMING_SNAKE_CASE : Tuple = ACTaFN[activation] def __a ( self ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[Any] = hidden_state SCREAMING_SNAKE_CASE : Optional[int] = self.layer(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = self.shortcut(__SCREAMING_SNAKE_CASE ) hidden_state += residual SCREAMING_SNAKE_CASE : Dict = self.activation(__SCREAMING_SNAKE_CASE ) return hidden_state class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 1 ,__SCREAMING_SNAKE_CASE = "relu" ,__SCREAMING_SNAKE_CASE = 4 ): super().__init__() SCREAMING_SNAKE_CASE : Dict = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE : int = out_channels // reduction SCREAMING_SNAKE_CASE : Dict = ( ResNetShortCut(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity() ) SCREAMING_SNAKE_CASE : Dict = nn.Sequential( ResNetConvLayer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,kernel_size=1 ) ,ResNetConvLayer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ) ,ResNetConvLayer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,kernel_size=1 ,activation=__SCREAMING_SNAKE_CASE ) ,) SCREAMING_SNAKE_CASE : str = ACTaFN[activation] def __a ( self ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Dict = hidden_state SCREAMING_SNAKE_CASE : Dict = self.layer(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Tuple = self.shortcut(__SCREAMING_SNAKE_CASE ) hidden_state += residual SCREAMING_SNAKE_CASE : List[Any] = self.activation(__SCREAMING_SNAKE_CASE ) return hidden_state class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 2 ,__SCREAMING_SNAKE_CASE = 2 ,): super().__init__() SCREAMING_SNAKE_CASE : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,activation=config.hidden_act ) ,*[layer(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,) def __a ( self ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : int = input for layer in self.layers: SCREAMING_SNAKE_CASE : Union[str, Any] = layer(__SCREAMING_SNAKE_CASE ) return hidden_state class _a ( nn.Module ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ): super().__init__() SCREAMING_SNAKE_CASE : Dict = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( __SCREAMING_SNAKE_CASE ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) ) SCREAMING_SNAKE_CASE : Optional[Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(__SCREAMING_SNAKE_CASE ,config.depths[1:] ): self.stages.append(ResNetStage(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,depth=__SCREAMING_SNAKE_CASE ) ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = False ,__SCREAMING_SNAKE_CASE = True ): SCREAMING_SNAKE_CASE : Optional[int] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: SCREAMING_SNAKE_CASE : List[Any] = hidden_states + (hidden_state,) SCREAMING_SNAKE_CASE : Dict = stage_module(__SCREAMING_SNAKE_CASE ) if output_hidden_states: SCREAMING_SNAKE_CASE : Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=__SCREAMING_SNAKE_CASE ,hidden_states=__SCREAMING_SNAKE_CASE ,) class _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" A = ResNetConfig A = 'resnet' A = 'pixel_values' A = True def __a ( self ,__SCREAMING_SNAKE_CASE ): if isinstance(__SCREAMING_SNAKE_CASE ,nn.Convad ): nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' ) elif isinstance(__SCREAMING_SNAKE_CASE ,(nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight ,1 ) nn.init.constant_(module.bias ,0 ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=False ): if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Tuple = value __UpperCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' __UpperCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE , ) class _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ): super().__init__(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = config SCREAMING_SNAKE_CASE : Union[str, Any] = ResNetEmbeddings(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = ResNetEncoder(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ): SCREAMING_SNAKE_CASE : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE : str = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE : List[str] = self.embedder(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = self.encoder( __SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = encoder_outputs[0] SCREAMING_SNAKE_CASE : str = self.pooler(__SCREAMING_SNAKE_CASE ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__SCREAMING_SNAKE_CASE ,pooler_output=__SCREAMING_SNAKE_CASE ,hidden_states=encoder_outputs.hidden_states ,) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE , ) class _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ): super().__init__(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = config.num_labels SCREAMING_SNAKE_CASE : str = ResNetModel(__SCREAMING_SNAKE_CASE ) # classification head SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Sequential( nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def __a ( self ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,): SCREAMING_SNAKE_CASE : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE : int = self.resnet(__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = outputs.pooler_output if return_dict else outputs[1] SCREAMING_SNAKE_CASE : Optional[int] = self.classifier(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE : Tuple = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE : Dict = 'single_label_classification' else: SCREAMING_SNAKE_CASE : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE : List[Any] = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.squeeze() ,labels.squeeze() ) else: SCREAMING_SNAKE_CASE : Any = loss_fct(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE : Union[str, Any] = CrossEntropyLoss() SCREAMING_SNAKE_CASE : Optional[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE : List[str] = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE : List[Any] = loss_fct(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) if not return_dict: SCREAMING_SNAKE_CASE : str = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__SCREAMING_SNAKE_CASE ,logits=__SCREAMING_SNAKE_CASE ,hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , SCREAMING_SNAKE_CASE , ) class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self ,__SCREAMING_SNAKE_CASE ): super().__init__(__SCREAMING_SNAKE_CASE ) super()._init_backbone(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : int = [config.embedding_size] + config.hidden_sizes SCREAMING_SNAKE_CASE : Union[str, Any] = ResNetEmbeddings(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = ResNetEncoder(__SCREAMING_SNAKE_CASE ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE ) @replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ): SCREAMING_SNAKE_CASE : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE : Optional[int] = self.embedder(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = self.encoder(__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE : Tuple = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: SCREAMING_SNAKE_CASE : int = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=__SCREAMING_SNAKE_CASE ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=__SCREAMING_SNAKE_CASE ,)
710
'''simple docstring''' # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from packaging import version from .. import __version__ from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, copy_func, replace_return_docstrings, ) from .generic import ( ContextManagers, ExplicitEnum, ModelOutput, PaddingStrategy, TensorType, add_model_info_to_auto_map, cached_property, can_return_loss, expand_dims, find_labels, flatten_dict, infer_framework, is_jax_tensor, is_numpy_array, is_tensor, is_tf_symbolic_tensor, is_tf_tensor, is_torch_device, is_torch_dtype, is_torch_tensor, reshape, squeeze, strtobool, tensor_size, to_numpy, to_py_obj, transpose, working_or_temp_dir, ) from .hub import ( CLOUDFRONT_DISTRIB_PREFIX, DISABLE_TELEMETRY, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, EntryNotFoundError, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, cached_file, default_cache_path, define_sagemaker_information, download_url, extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, has_file, http_user_agent, is_offline_mode, is_remote_url, move_cache, send_example_telemetry, try_to_load_from_cache, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, USE_JAX, USE_TF, USE_TORCH, DummyObject, OptionalDependencyNotAvailable, _LazyModule, ccl_version, direct_transformers_import, get_torch_version, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_bsa_available, is_coloredlogs_available, is_cython_available, is_datasets_available, is_decord_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_jieba_available, is_jumanpp_available, is_kenlm_available, is_keras_nlp_available, is_librosa_available, is_natten_available, is_ninja_available, is_onnx_available, is_openai_available, is_optimum_available, is_pandas_available, is_peft_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytest_available, is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_bfaa_cpu_available, is_torch_bfaa_gpu_available, is_torch_compile_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_neuroncore_available, is_torch_tensorrt_fx_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_torchdistx_available, is_torchdynamo_available, is_torchvision_available, is_training_run_on_sagemaker, is_vision_available, requires_backends, torch_only_method, ) __UpperCAmelCase = 'pytorch_model.bin' __UpperCAmelCase = 'pytorch_model.bin.index.json' __UpperCAmelCase = 'adapter_config.json' __UpperCAmelCase = 'adapter_model.bin' __UpperCAmelCase = 'adapter_model.safetensors' __UpperCAmelCase = 'tf_model.h5' __UpperCAmelCase = 'tf_model.h5.index.json' __UpperCAmelCase = 'model.ckpt' __UpperCAmelCase = 'flax_model.msgpack' __UpperCAmelCase = 'flax_model.msgpack.index.json' __UpperCAmelCase = 'model.safetensors' __UpperCAmelCase = 'model.safetensors.index.json' __UpperCAmelCase = 'config.json' __UpperCAmelCase = 'preprocessor_config.json' __UpperCAmelCase = FEATURE_EXTRACTOR_NAME __UpperCAmelCase = 'generation_config.json' __UpperCAmelCase = 'modelcard.json' __UpperCAmelCase = '▁' __UpperCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility __UpperCAmelCase = [ [[0, 1, 0, 1], [1, 0, 0, 1]] ] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too. __UpperCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] __UpperCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> Tuple: if version.parse(snake_case_ ) < version.parse(snake_case_ ): if "dev" in min_version: SCREAMING_SNAKE_CASE : Any = ( 'This example requires a source install from HuggingFace Transformers (see ' '`https://huggingface.co/docs/transformers/installation#install-from-source`),' ) else: SCREAMING_SNAKE_CASE : List[Any] = f"""This example requires a minimum version of {min_version},""" error_message += f""" but the version found is {__version__}.\n""" raise ImportError( error_message + 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other ' 'versions of HuggingFace Transformers.' )
220
0
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ : List[Any] = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
from __future__ import annotations from dataclasses import dataclass @dataclass class a : """simple docstring""" UpperCamelCase_ : float UpperCamelCase_ : TreeNode | None = None UpperCamelCase_ : TreeNode | None = None def _A( UpperCamelCase__ : TreeNode | None ) -> bool: '''simple docstring''' def is_valid_tree(UpperCamelCase__ : TreeNode | None ) -> bool: if node is None: return True if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(UpperCamelCase__ ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( UpperCamelCase__ : TreeNode | None , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , UpperCamelCase__ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , UpperCamelCase__ ) ) return is_binary_search_tree_recursive_check(UpperCamelCase__ , -float('''inf''' ) , float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
332
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Any = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class A__ ( __lowerCAmelCase ): """simple docstring""" _lowercase = 'deberta-v2' def __init__( self : int , lowerCamelCase__ : Tuple=128_100 , lowerCamelCase__ : int=1_536 , lowerCamelCase__ : Union[str, Any]=24 , lowerCamelCase__ : Union[str, Any]=24 , lowerCamelCase__ : Optional[Any]=6_144 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Dict=512 , lowerCamelCase__ : Union[str, Any]=0 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : Optional[int]=1E-7 , lowerCamelCase__ : str=False , lowerCamelCase__ : Tuple=-1 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=0 , lowerCamelCase__ : int="gelu" , **lowerCamelCase__ : int , ): super().__init__(**_UpperCamelCase ) a__ : Dict = hidden_size a__ : str = num_hidden_layers a__ : Dict = num_attention_heads a__ : List[str] = intermediate_size a__ : Tuple = hidden_act a__ : List[str] = hidden_dropout_prob a__ : int = attention_probs_dropout_prob a__ : Any = max_position_embeddings a__ : Any = type_vocab_size a__ : Optional[Any] = initializer_range a__ : Union[str, Any] = relative_attention a__ : int = max_relative_positions a__ : str = pad_token_id a__ : int = position_biased_input # Backwards compatibility if type(_UpperCamelCase ) == str: a__ : List[str] = [x.strip() for x in pos_att_type.lower().split("|" )] a__ : List[Any] = pos_att_type a__ : Dict = vocab_size a__ : Optional[int] = layer_norm_eps a__ : int = kwargs.get("pooler_hidden_size" , _UpperCamelCase ) a__ : str = pooler_dropout a__ : Optional[Any] = pooler_hidden_act class A__ ( __lowerCAmelCase ): """simple docstring""" @property def _UpperCamelCase( self : str ): if self.task == "multiple-choice": a__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: a__ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def _UpperCamelCase( self : str ): return 12 def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional["TensorType"] = None , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 40 , lowerCamelCase__ : int = 40 , lowerCamelCase__ : "PreTrainedTokenizerBase" = None , ): a__ : Any = super().generate_dummy_inputs(preprocessor=_UpperCamelCase , framework=_UpperCamelCase ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
715
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCamelCase : Dict = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCamelCase : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def UpperCamelCase_ ( __a ) -> str: if "://" in dataset_path: a__ : Any = dataset_path.split("://" )[1] return dataset_path def UpperCamelCase_ ( __a ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = not is_remote_filesystem(__a ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(__a ) , fs._strip_protocol(__a ) ) else: fs.mv(__a , __a , recursive=__a ) def UpperCamelCase_ ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: a__ : Tuple = None a__ : int = None a__ : int = threading.Lock()
151
0
"""simple docstring""" import re import string import numpy as np import datasets _lowercase = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' _lowercase = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' _lowercase = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,reference_urls=[] ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : List[Any] ,A_ : int=None ,A_ : List[Any]=False ,A_ : str=False ,A_ : Dict=False ,) -> List[Any]: if regexes_to_ignore is not None: for s in regexes_to_ignore: A = np.array([re.sub(A_ ,'' ,A_ ) for x in predictions] ) A = np.array([re.sub(A_ ,'' ,A_ ) for x in references] ) else: A = np.asarray(A_ ) A = np.asarray(A_ ) if ignore_case: A = np.char.lower(A_ ) A = np.char.lower(A_ ) if ignore_punctuation: A = string.punctuation.maketrans('' ,'' ,string.punctuation ) A = np.char.translate(A_ ,table=A_ ) A = np.char.translate(A_ ,table=A_ ) if ignore_numbers: A = string.digits.maketrans('' ,'' ,string.digits ) A = np.char.translate(A_ ,table=A_ ) A = np.char.translate(A_ ,table=A_ ) A = predictions == references return {"exact_match": np.mean(A_ ) * 100}
91
import os # Precomputes a list of the 100 first triangular numbers UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def _A ( ): """simple docstring""" lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) ) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "words.txt" ) lowerCAmelCase__ = "" with open(lowerCAmelCase_ ) as f: lowerCAmelCase__ = f.readline() lowerCAmelCase__ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )] lowerCAmelCase__ = [ word for word in [sum(ord(lowerCAmelCase_ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(lowerCAmelCase_ ) if __name__ == "__main__": print(solution())
61
0
"""simple docstring""" from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class a ( __lowerCAmelCase ): """simple docstring""" def __A ( self ) -> List[str]: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def __A ( self ) -> Optional[int]: _UpperCAmelCase = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(lowerCAmelCase_ ) def __A ( self ) -> Dict: _UpperCAmelCase = self._create_example_records() _UpperCAmelCase = Dataset.from_list(lowerCAmelCase_ ) self.assertListEqual(dset.column_names , ["col_1", "col_2"] ) for i, r in enumerate(lowerCAmelCase_ ): self.assertDictEqual(lowerCAmelCase_ , example_records[i] ) def __A ( self ) -> str: _UpperCAmelCase = self._create_example_records() _UpperCAmelCase = Dataset.from_list(lowerCAmelCase_ ) _UpperCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info , dset_from_dict.info ) def __A ( self ) -> Any: # checks what happens with missing columns _UpperCAmelCase = [{"col_1": 1}, {"col_2": "x"}] _UpperCAmelCase = Dataset.from_list(lowerCAmelCase_ ) self.assertDictEqual(dset[0] , {"col_1": 1} ) self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns def __A ( self ) -> Tuple: # checks if the type can be inferred from the second record _UpperCAmelCase = [{"col_1": []}, {"col_1": [1, 2]}] _UpperCAmelCase = Dataset.from_list(lowerCAmelCase_ ) self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) ) def __A ( self ) -> Any: _UpperCAmelCase = Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase_ ) , 0 ) self.assertListEqual(dset.column_names , [] )
717
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_ = logging.get_logger() def A__ ( A__ , A__ , A__ , A__ , A__ = True ) -> Any: '''simple docstring''' print(F"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": _UpperCAmelCase = timm.create_model("levit_128s" , pretrained=A__ ) else: _UpperCAmelCase = timm.create_model("levit_128" , pretrained=A__ ) if hidden_sizes == 192: _UpperCAmelCase = timm.create_model("levit_192" , pretrained=A__ ) if hidden_sizes == 256: _UpperCAmelCase = timm.create_model("levit_256" , pretrained=A__ ) if hidden_sizes == 384: _UpperCAmelCase = timm.create_model("levit_384" , pretrained=A__ ) from_model.eval() _UpperCAmelCase = LevitForImageClassificationWithTeacher(A__ ).eval() _UpperCAmelCase = OrderedDict() _UpperCAmelCase = from_model.state_dict() _UpperCAmelCase = list(from_model.state_dict().keys() ) _UpperCAmelCase = list(our_model.state_dict().keys() ) print(len(A__ ) , len(A__ ) ) for i in range(len(A__ ) ): _UpperCAmelCase = weights[og_keys[i]] our_model.load_state_dict(A__ ) _UpperCAmelCase = torch.randn((2, 3, 224, 224) ) _UpperCAmelCase = from_model(A__ ) _UpperCAmelCase = our_model(A__ ).logits assert torch.allclose(A__ , A__ ), "The model logits don't match the original one." _UpperCAmelCase = name print(A__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) _UpperCAmelCase = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F"""Pushed {checkpoint_name}""" ) def A__ ( A__ , A__ = None , A__ = True ) -> Dict: '''simple docstring''' _UpperCAmelCase = "imagenet-1k-id2label.json" _UpperCAmelCase = 1000 _UpperCAmelCase = (1, num_labels) _UpperCAmelCase = "huggingface/label-files" _UpperCAmelCase = num_labels _UpperCAmelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase = {int(A__ ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} _UpperCAmelCase = partial(A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ ) _UpperCAmelCase = { "levit-128S": 128, "levit-128": 128, "levit-192": 192, "levit-256": 256, "levit-384": 384, } _UpperCAmelCase = { "levit-128S": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), "levit-128": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), "levit-192": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), "levit-256": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), "levit-384": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , A__ , names_to_config[model_name] , A__ , A__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , A__ , A__ , A__ , A__ ) return config, expected_shape if __name__ == "__main__": SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) SCREAMING_SNAKE_CASE_ = parser.parse_args() SCREAMING_SNAKE_CASE_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
579
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING lowerCamelCase__ : int = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): snake_case__ = True # Deal with multi-line cases elif ( re.search( rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __lowerCAmelCase , ) is not None ): snake_case__ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: snake_case__ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files snake_case__ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] snake_case__ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed snake_case__ = True if not attribute_used: snake_case__ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: snake_case__ = True elif attribute in ["tie_word_embeddings"] and default_value is False: snake_case__ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: snake_case__ = True elif attribute.endswith('''_token_id''' ): snake_case__ = True # configuration class specific cases if not case_allowed: snake_case__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) snake_case__ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: snake_case__ = dict(inspect.signature(config_class.__init__ ).parameters ) snake_case__ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] snake_case__ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass snake_case__ = {} if len(config_class.attribute_map ) > 0: snake_case__ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files snake_case__ = inspect.getsourcefile(__lowerCAmelCase ) snake_case__ = os.path.dirname(__lowerCAmelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('''modeling_''' )] # Get the source code strings snake_case__ = [] for path in modeling_paths: if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase ) as fp: modeling_sources.append(fp.read() ) snake_case__ = [] for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ): # `attributes` here is all the variant names for `config_param` snake_case__ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): unused_attributes.append(attributes[0] ) return sorted(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> int: snake_case__ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) snake_case__ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase ) and issubclass(__lowerCAmelCase , __lowerCAmelCase ) and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: snake_case__ = check_config_attributes_being_used(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = unused_attributes if len(__lowerCAmelCase ) > 0: snake_case__ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(__lowerCAmelCase ) if __name__ == "__main__": check_config_attributes()
33
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a ( lowercase ): UpperCamelCase : Tuple = ["""image_processor""", """tokenizer"""] UpperCamelCase : List[str] = """LayoutLMv2ImageProcessor""" UpperCamelCase : Optional[int] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""") def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ): if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , UpperCamelCase_ , ) UpperCAmelCase__ : Tuple = kwargs.pop('feature_extractor' ) UpperCAmelCase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(UpperCamelCase_ , UpperCamelCase_ ) def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes ' 'if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' ) # first, apply the image processor UpperCAmelCase__ : List[Any] = self.image_processor(images=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : Any = [text] # add batch dimension (as the image processor always adds a batch dimension) UpperCAmelCase__ : Union[str, Any] = features['words'] UpperCAmelCase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , ) # add pixel values UpperCAmelCase__ : List[Any] = features.pop('pixel_values' ) if return_overflowing_tokens is True: UpperCAmelCase__ : int = self.get_overflowing_images(UpperCamelCase_ , encoded_inputs['overflow_to_sample_mapping'] ) UpperCAmelCase__ : Dict = images return encoded_inputs def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image UpperCAmelCase__ : Dict = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F''' {len(UpperCamelCase_ )} and {len(UpperCamelCase_ )}''' ) return images_with_overflow def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @property def __snake_case ( self ): return ["input_ids", "bbox", "attention_mask", "image"] @property def __snake_case ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase_ , ) return self.image_processor_class @property def __snake_case ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase_ , ) return self.image_processor
110
0
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCAmelCase = logging.getLogger(__name__) _UpperCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __magic_name__ : """simple docstring""" _UpperCamelCase = field( default=lowercase_ ,metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase_ )} ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,) @dataclass class __magic_name__ : """simple docstring""" _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "The input training data file (a text file)."} ) _UpperCamelCase = field( default=lowercase_ ,metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) _UpperCamelCase = field(default=lowercase_ ,metadata={"help": "Whether ot not to use whole word mask."} ) _UpperCamelCase = field( default=0.1_5 ,metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) _UpperCamelCase = field( default=1 / 6 ,metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } ,) _UpperCamelCase = field( default=5 ,metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) _UpperCamelCase = field( default=-1 ,metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } ,) _UpperCamelCase = field( default=lowercase_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _lowerCamelCase ( _a , _a , _a = False , _a = None , ): """simple docstring""" def _dataset(_a , _a=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=_a , file_path=_a , block_size=args.block_size , ref_path=_a , ) return LineByLineTextDataset(tokenizer=_a , file_path=_a , block_size=args.block_size ) else: return TextDataset( tokenizer=_a , file_path=_a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_a , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(_a ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _lowerCamelCase ( ): """simple docstring""" _lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , _a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: _lowerCamelCase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _lowerCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: _lowerCamelCase = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: _lowerCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _lowerCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: _lowerCamelCase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) _lowerCamelCase = AutoModelWithLMHead.from_config(_a ) model.resize_token_embeddings(len(_a ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: _lowerCamelCase = tokenizer.max_len # Our input block size will be the max possible for the model else: _lowerCamelCase = min(data_args.block_size , tokenizer.max_len ) # Get datasets _lowerCamelCase = ( get_dataset(_a , tokenizer=_a , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) _lowerCamelCase = ( get_dataset(_a , tokenizer=_a , evaluate=_a , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": _lowerCamelCase = DataCollatorForPermutationLanguageModeling( tokenizer=_a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: _lowerCamelCase = DataCollatorForWholeWordMask( tokenizer=_a , mlm_probability=data_args.mlm_probability ) else: _lowerCamelCase = DataCollatorForLanguageModeling( tokenizer=_a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _lowerCamelCase = Trainer( model=_a , args=_a , data_collator=_a , train_dataset=_a , eval_dataset=_a , prediction_loss_only=_a , ) # Training if training_args.do_train: _lowerCamelCase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=_a ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _lowerCamelCase = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _lowerCamelCase = trainer.evaluate() _lowerCamelCase = math.exp(eval_output['''eval_loss'''] ) _lowerCamelCase = {'''perplexity''': perplexity} _lowerCamelCase = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(_a , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , _a , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(_a ) return results def _lowerCamelCase ( _a ): """simple docstring""" main() if __name__ == "__main__": main()
297
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ ( lowercase_ ): """simple docstring""" _UpperCamelCase = ["image_processor", "tokenizer"] _UpperCamelCase = "FlavaImageProcessor" _UpperCamelCase = ("BertTokenizer", "BertTokenizerFast") def __init__( self , a__=None , a__=None , **a__ ): _lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a__ , ) _lowerCamelCase = kwargs.pop('''feature_extractor''' ) _lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a__ , a__ ) _lowerCamelCase = self.image_processor def __call__( self , a__ = None , a__ = None , a__ = True , a__ = False , a__ = False , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: _lowerCamelCase = self.tokenizer( text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , ) if images is not None: _lowerCamelCase = self.image_processor( a__ , return_image_mask=a__ , return_codebook_pixels=a__ , return_tensors=a__ , **a__ , ) if text is not None and images is not None: encoding.update(a__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ ) def _UpperCAmelCase ( self , *a__ , **a__ ): return self.tokenizer.batch_decode(*a__ , **a__ ) def _UpperCAmelCase ( self , *a__ , **a__ ): return self.tokenizer.decode(*a__ , **a__ ) @property def _UpperCAmelCase ( self ): _lowerCamelCase = self.tokenizer.model_input_names _lowerCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _UpperCAmelCase ( self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a__ , ) return self.image_processor_class @property def _UpperCAmelCase ( self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a__ , ) return self.image_processor
297
1
'''simple docstring''' from math import factorial class snake_case__ : def __init__( self : List[Any] , __a : List[Any] , __a : Any ) -> str: '''simple docstring''' __snake_case : Optional[Any] = real if isinstance(__a , __a ): __snake_case : Union[str, Any] = [1] * rank else: __snake_case : List[str] = rank def __repr__( self : List[Any] ) -> Dict: '''simple docstring''' return ( f'''{self.real}+''' f'''{"+".join(str(__a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}''' ) def A_ ( self : Optional[int] ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , __a ) def __add__( self : List[str] , __a : Dict ) -> str: '''simple docstring''' if not isinstance(__a , __a ): return Dual(self.real + other , self.duals ) __snake_case : Optional[Any] = self.duals.copy() __snake_case : Optional[Any] = other.duals.copy() if len(__a ) > len(__a ): o_dual.extend([1] * (len(__a ) - len(__a )) ) elif len(__a ) < len(__a ): s_dual.extend([1] * (len(__a ) - len(__a )) ) __snake_case : Dict = [] for i in range(len(__a ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , __a ) A__ = __add__ def __sub__( self : Optional[int] , __a : Tuple ) -> List[Any]: '''simple docstring''' return self + other * -1 def __mul__( self : int , __a : List[Any] ) -> Dict: '''simple docstring''' if not isinstance(__a , __a ): __snake_case : List[str] = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , __a ) __snake_case : List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , __a ) A__ = __mul__ def __truediv__( self : Optional[int] , __a : Any ) -> int: '''simple docstring''' if not isinstance(__a , __a ): __snake_case : Any = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , __a ) raise ValueError def __floordiv__( self : Optional[int] , __a : Union[str, Any] ) -> Any: '''simple docstring''' if not isinstance(__a , __a ): __snake_case : Tuple = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , __a ) raise ValueError def __pow__( self : List[Any] , __a : List[Any] ) -> Union[str, Any]: '''simple docstring''' if n < 0 or isinstance(__a , __a ): raise ValueError('power must be a positive integer' ) if n == 0: return 1 if n == 1: return self __snake_case : List[str] = self for _ in range(n - 1 ): x *= self return x def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ) -> Tuple: if not callable(_UpperCAmelCase ): raise ValueError('differentiate() requires a function as input for func' ) if not isinstance(_UpperCAmelCase ,(float, int) ): raise ValueError('differentiate() requires a float as input for position' ) if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise ValueError('differentiate() requires an int as input for order' ) __snake_case : Optional[int] = Dual(_UpperCAmelCase ,1 ) __snake_case : List[str] = func(_UpperCAmelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() def a_ ( _UpperCAmelCase : Dict ) -> Any: return y**2 * y**4 print(differentiate(f, 9, 2))
286
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyInpaintPipeline A__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] A__ = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] A__ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' return 32 @property def A_ ( self : Optional[int] ) -> Tuple: '''simple docstring''' return 32 @property def A_ ( self : Dict ) -> str: '''simple docstring''' return self.time_input_dim @property def A_ ( self : Dict ) -> Any: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : List[Any] ) -> List[str]: '''simple docstring''' return 100 @property def A_ ( self : int ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def A_ ( self : Tuple ) -> str: '''simple docstring''' torch.manual_seed(0 ) __snake_case : str = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __snake_case : List[Any] = MultilingualCLIP(__a ) __snake_case : List[str] = text_encoder.eval() return text_encoder @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Tuple = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __snake_case : Any = UNetaDConditionModel(**__a ) return model @property def A_ ( self : List[str] ) -> List[Any]: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def A_ ( self : int ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def A_ ( self : Dict ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.dummy_text_encoder __snake_case : str = self.dummy_tokenizer __snake_case : Any = self.dummy_unet __snake_case : Optional[Any] = self.dummy_movq __snake_case : Optional[Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type='epsilon' , thresholding=__a , ) __snake_case : List[str] = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def A_ ( self : List[str] , __a : Dict , __a : int=0 ) -> Optional[Any]: '''simple docstring''' __snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a ) __snake_case : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a ) # create init_image __snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a ) __snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : List[str] = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) ) # create mask __snake_case : Optional[int] = np.ones((64, 64) , dtype=np.floataa ) __snake_case : List[Any] = 0 if str(__a ).startswith('mps' ): __snake_case : Any = torch.manual_seed(__a ) else: __snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : int = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def A_ ( self : Any ) -> int: '''simple docstring''' __snake_case : List[str] = 'cpu' __snake_case : Tuple = self.get_dummy_components() __snake_case : Optional[int] = self.pipeline_class(**__a ) __snake_case : Union[str, Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : Dict = output.images __snake_case : Union[str, Any] = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : int = image[0, -3:, -3:, -1] __snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) __snake_case : List[Any] = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def A_ ( self : List[Any] ) -> List[str]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def A_ ( self : Dict ) -> List[str]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __snake_case : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __snake_case : Tuple = np.ones((768, 768) , dtype=np.floataa ) __snake_case : Tuple = 0 __snake_case : Tuple = 'a hat' __snake_case : Optional[Any] = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__a ) __snake_case : int = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) __snake_case : Optional[Any] = pipeline.to(__a ) pipeline.set_progress_bar_config(disable=__a ) __snake_case : Any = torch.Generator(device='cpu' ).manual_seed(0 ) __snake_case , __snake_case : Union[str, Any] = pipe_prior( __a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __snake_case : List[str] = pipeline( __a , image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) __snake_case : int = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a )
286
1
def a(lowercase__ ): '''simple docstring''' snake_case_ = 1 for i in range(1 , num + 1 ): fact *= i return fact def a(lowercase__ ): '''simple docstring''' snake_case_ = 0 while number > 0: snake_case_ = number % 10 sum_of_digits += last_digit snake_case_ = number // 10 # Removing the last_digit from the given number return sum_of_digits def a(lowercase__ = 100 ): '''simple docstring''' snake_case_ = factorial(lowercase__ ) snake_case_ = split_and_add(lowercase__ ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
721
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=16 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=36 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=5_12 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.get_config() snake_case_ = 3_00 return config def __lowerCAmelCase ( self ): """simple docstring""" ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = self.prepare_config_and_inputs() snake_case_ = True snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = MraModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) snake_case_ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase ) snake_case_ = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): """simple docstring""" snake_case_ = True snake_case_ = MraModel(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , ) snake_case_ = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , ) snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = MraForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = MraForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = self.num_labels snake_case_ = MraForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = self.num_labels snake_case_ = MraForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = self.num_choices snake_case_ = MraForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ): """simple docstring""" __A = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __A = False __A = False __A = False __A = False __A = () def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = MraModelTester(self ) snake_case_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def __lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def __lowerCAmelCase ( self ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = MraModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @unittest.skip(reason='MRA does not output attentions' ) def __lowerCAmelCase ( self ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) snake_case_ = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCamelCase )[0] snake_case_ = torch.Size((1, 2_56, 7_68) ) self.assertEqual(output.shape , __UpperCamelCase ) snake_case_ = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) snake_case_ = torch.arange(2_56 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCamelCase )[0] snake_case_ = 5_02_65 snake_case_ = torch.Size((1, 2_56, vocab_size) ) self.assertEqual(output.shape , __UpperCamelCase ) snake_case_ = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) ) @slow def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) snake_case_ = torch.arange(40_96 ).unsqueeze(0 ) with torch.no_grad(): snake_case_ = model(__UpperCamelCase )[0] snake_case_ = 5_02_65 snake_case_ = torch.Size((1, 40_96, vocab_size) ) self.assertEqual(output.shape , __UpperCamelCase ) snake_case_ = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
46
0
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def a_ ( __UpperCAmelCase , __UpperCAmelCase=1 ) -> Union[str, Any]: """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def a_ ( __UpperCAmelCase , __UpperCAmelCase=0 ) -> Union[str, Any]: """simple docstring""" snake_case: str =[] for old_item in old_list: snake_case: Any =old_item.replace('in_layers.0' , 'norm1' ) snake_case: Any =new_item.replace('in_layers.2' , 'conv1' ) snake_case: Tuple =new_item.replace('out_layers.0' , 'norm2' ) snake_case: List[Any] =new_item.replace('out_layers.3' , 'conv2' ) snake_case: str =new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case: int =new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case: Optional[int] =shave_segments(__UpperCAmelCase , n_shave_prefix_segments=__UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def a_ ( __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[Any]: """simple docstring""" snake_case: Tuple =[] for old_item in old_list: snake_case: Dict =old_item snake_case: Dict =new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case: Union[str, Any] =new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case: Any =new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case: Optional[int] =new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case: Dict =shave_segments(__UpperCAmelCase , n_shave_prefix_segments=__UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Any: """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case: Any =old_checkpoint[path] snake_case: int =old_tensor.shape[0] // 3 snake_case: int =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case: Union[str, Any] =old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case: str =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case , snake_case , snake_case: int =old_tensor.split(channels // num_heads , dim=1 ) snake_case: Tuple =query.reshape(__UpperCAmelCase ) snake_case: Optional[int] =key.reshape(__UpperCAmelCase ) snake_case: int =value.reshape(__UpperCAmelCase ) for path in paths: snake_case: List[Any] =path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case: Optional[int] =new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case: int =new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case: Dict =new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case: Tuple =new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case: Optional[Any] =old_checkpoint[path['old']][:, :, 0] else: snake_case: Dict =old_checkpoint[path['old']] def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: """simple docstring""" snake_case: Any ={} snake_case: Union[str, Any] =checkpoint['time_embed.0.weight'] snake_case: Any =checkpoint['time_embed.0.bias'] snake_case: Dict =checkpoint['time_embed.2.weight'] snake_case: int =checkpoint['time_embed.2.bias'] snake_case: List[Any] =checkpoint['input_blocks.0.0.weight'] snake_case: str =checkpoint['input_blocks.0.0.bias'] snake_case: List[Any] =checkpoint['out.0.weight'] snake_case: str =checkpoint['out.0.bias'] snake_case: Union[str, Any] =checkpoint['out.2.weight'] snake_case: Optional[Any] =checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case: str =len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case: Union[str, Any] ={ layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case: int =len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case: Optional[Any] ={ layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case: Dict =len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case: List[str] ={ layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCAmelCase ) } for i in range(1 , __UpperCAmelCase ): snake_case: Optional[int] =(i - 1) // (config['num_res_blocks'] + 1) snake_case: str =(i - 1) % (config['num_res_blocks'] + 1) snake_case: Union[str, Any] =[key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key] snake_case: str =[key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key] if f'''input_blocks.{i}.0.op.weight''' in checkpoint: snake_case: Tuple =checkpoint[ f'''input_blocks.{i}.0.op.weight''' ] snake_case: Any =checkpoint[ f'''input_blocks.{i}.0.op.bias''' ] continue snake_case: Optional[int] =renew_resnet_paths(__UpperCAmelCase ) snake_case: Any ={'old': f'''input_blocks.{i}.0''', 'new': f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} snake_case: List[Any] ={'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCAmelCase ) if len(__UpperCAmelCase ): snake_case: List[Any] =renew_attention_paths(__UpperCAmelCase ) snake_case: Any ={ 'old': f'''input_blocks.{i}.1''', 'new': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } snake_case: Optional[Any] ={ f'''input_blocks.{i}.1.qkv.bias''': { 'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', 'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', 'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''input_blocks.{i}.1.qkv.weight''': { 'key': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', 'query': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', 'value': f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCAmelCase , config=__UpperCAmelCase , ) snake_case: int =middle_blocks[0] snake_case: Tuple =middle_blocks[1] snake_case: Optional[Any] =middle_blocks[2] snake_case: Optional[Any] =renew_resnet_paths(__UpperCAmelCase ) assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , config=__UpperCAmelCase ) snake_case: Any =renew_resnet_paths(__UpperCAmelCase ) assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , config=__UpperCAmelCase ) snake_case: Optional[int] =renew_attention_paths(__UpperCAmelCase ) snake_case: Union[str, Any] ={ 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , attention_paths_to_split=__UpperCAmelCase , config=__UpperCAmelCase ) for i in range(__UpperCAmelCase ): snake_case: List[Any] =i // (config['num_res_blocks'] + 1) snake_case: Any =i % (config['num_res_blocks'] + 1) snake_case: List[str] =[shave_segments(__UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case: List[str] ={} for layer in output_block_layers: snake_case , snake_case: List[Any] =layer.split('.' )[0], shave_segments(__UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCAmelCase ) else: snake_case: int =[layer_name] if len(__UpperCAmelCase ) > 1: snake_case: Dict =[key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key] snake_case: str =[key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key] snake_case: int =renew_resnet_paths(__UpperCAmelCase ) snake_case: Optional[int] =renew_resnet_paths(__UpperCAmelCase ) snake_case: List[str] ={'old': f'''output_blocks.{i}.0''', 'new': f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , config=__UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case: Optional[Any] =list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case: int =checkpoint[ f'''output_blocks.{i}.{index}.conv.weight''' ] snake_case: List[str] =checkpoint[ f'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCAmelCase ) == 2: snake_case: Union[str, Any] =[] if len(__UpperCAmelCase ): snake_case: Optional[int] =renew_attention_paths(__UpperCAmelCase ) snake_case: str ={ 'old': f'''output_blocks.{i}.1''', 'new': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } snake_case: List[Any] ={ f'''output_blocks.{i}.1.qkv.bias''': { 'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', 'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', 'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, f'''output_blocks.{i}.1.qkv.weight''': { 'key': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', 'query': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', 'value': f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=__UpperCAmelCase , ) else: snake_case: Union[str, Any] =renew_resnet_paths(__UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case: int ='.'.join(['output_blocks', str(__UpperCAmelCase ), path['old']] ) snake_case: Tuple ='.'.join(['up_blocks', str(__UpperCAmelCase ), 'resnets', str(__UpperCAmelCase ), path['new']] ) snake_case: List[Any] =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') a = parser.parse_args() a = torch.load(args.checkpoint_path) with open(args.config_file) as f: a = json.loads(f.read()) a = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] a = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: a = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) a = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) a = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
350
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a = 'base_with_context' def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: """simple docstring""" snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) snake_case: Tuple =nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): snake_case: Dict =weights[f'''layers_{lyr_num}'''] snake_case: str =nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) snake_case: Any =ly_weight['attention'] snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) snake_case: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) snake_case: Any =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) snake_case: Dict =nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): snake_case: List[Any] =weights[f'''layers_{lyr_num}'''] snake_case: Tuple =ly_weight['attention'] snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) snake_case: Optional[Any] =nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) snake_case: Tuple =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) snake_case: Any =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) snake_case: List[str] =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: """simple docstring""" snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) snake_case: Dict =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) snake_case: Tuple =nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase ) snake_case: Any =nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): snake_case: List[str] =weights[f'''layers_{lyr_num}'''] snake_case: Any =nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) snake_case: int =nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) snake_case: str =ly_weight['self_attention'] snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) snake_case: Optional[Any] =ly_weight['MultiHeadDotProductAttention_0'] snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) snake_case: Any =nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) snake_case: Union[str, Any] =nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) snake_case: int =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def a_ ( __UpperCAmelCase ) -> Dict: """simple docstring""" snake_case: Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path ) snake_case: Tuple =jnp.tree_util.tree_map(onp.array , __UpperCAmelCase ) snake_case: str =[ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] snake_case: List[Any] =os.path.join(args.checkpoint_path , '..' , 'config.gin' ) snake_case: Optional[Any] =inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase ) snake_case: List[str] =inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase ) snake_case: List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) snake_case: Optional[Any] =SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) snake_case: Optional[Any] =SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) snake_case: List[Any] =TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) snake_case: Optional[Any] =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __UpperCAmelCase ) snake_case: Optional[Any] =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __UpperCAmelCase ) snake_case: Union[str, Any] =load_decoder(ta_checkpoint['target']['decoder'] , __UpperCAmelCase ) snake_case: int =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) snake_case: Optional[Any] =SpectrogramDiffusionPipeline( notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='Path to the original jax model checkpoint.', ) a = parser.parse_args() main(args)
350
1
"""simple docstring""" import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() _lowercase : Optional[Any] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :int ): __UpperCAmelCase = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __UpperCAmelCase = 128 elif "12-12" in model_name: __UpperCAmelCase = 12 __UpperCAmelCase = 12 elif "14-14" in model_name: __UpperCAmelCase = 14 __UpperCAmelCase = 14 elif "16-16" in model_name: __UpperCAmelCase = 16 __UpperCAmelCase = 16 else: raise ValueError('''Model not supported''' ) __UpperCAmelCase = '''huggingface/label-files''' if "speech-commands" in model_name: __UpperCAmelCase = 35 __UpperCAmelCase = '''speech-commands-v2-id2label.json''' else: __UpperCAmelCase = 527 __UpperCAmelCase = '''audioset-id2label.json''' __UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) ) __UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} __UpperCAmelCase = idalabel __UpperCAmelCase = {v: k for k, v in idalabel.items()} return config def lowercase__ ( snake_case_ :Optional[int] ): if "module.v" in name: __UpperCAmelCase = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: __UpperCAmelCase = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: __UpperCAmelCase = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: __UpperCAmelCase = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: __UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: __UpperCAmelCase = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: __UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __UpperCAmelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __UpperCAmelCase = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: __UpperCAmelCase = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: __UpperCAmelCase = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def lowercase__ ( snake_case_ :str , snake_case_ :int ): for key in orig_state_dict.copy().keys(): __UpperCAmelCase = orig_state_dict.pop(snake_case_ ) if "qkv" in key: __UpperCAmelCase = key.split('''.''' ) __UpperCAmelCase = int(key_split[3] ) __UpperCAmelCase = config.hidden_size if "weight" in key: __UpperCAmelCase = val[:dim, :] __UpperCAmelCase = val[dim : dim * 2, :] __UpperCAmelCase = val[-dim:, :] else: __UpperCAmelCase = val[:dim] __UpperCAmelCase = val[dim : dim * 2] __UpperCAmelCase = val[-dim:] else: __UpperCAmelCase = val return orig_state_dict def lowercase__ ( snake_case_ :Optional[Any] ): __UpperCAmelCase = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(snake_case_ , snake_case_ ) @torch.no_grad() def lowercase__ ( snake_case_ :Any , snake_case_ :Tuple , snake_case_ :Tuple=False ): __UpperCAmelCase = get_audio_spectrogram_transformer_config(snake_case_ ) __UpperCAmelCase = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict __UpperCAmelCase = model_name_to_url[model_name] __UpperCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' ) # remove some keys remove_keys(snake_case_ ) # rename some keys __UpperCAmelCase = convert_state_dict(snake_case_ , snake_case_ ) # load 🤗 model __UpperCAmelCase = ASTForAudioClassification(snake_case_ ) model.eval() model.load_state_dict(snake_case_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __UpperCAmelCase = -4.2677393 if '''speech-commands''' not in model_name else -6.845978 __UpperCAmelCase = 4.5689974 if '''speech-commands''' not in model_name else 5.5654526 __UpperCAmelCase = 1_024 if '''speech-commands''' not in model_name else 128 __UpperCAmelCase = ASTFeatureExtractor(mean=snake_case_ , std=snake_case_ , max_length=snake_case_ ) if "speech-commands" in model_name: __UpperCAmelCase = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) __UpperCAmelCase = dataset[0]['''audio''']['''array'''] else: __UpperCAmelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) __UpperCAmelCase , __UpperCAmelCase = torchaudio.load(snake_case_ ) __UpperCAmelCase = waveform.squeeze().numpy() __UpperCAmelCase = feature_extractor(snake_case_ , sampling_rate=16_000 , return_tensors='''pt''' ) # forward pass __UpperCAmelCase = model(**snake_case_ ) __UpperCAmelCase = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __UpperCAmelCase = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __UpperCAmelCase = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __UpperCAmelCase = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __UpperCAmelCase = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __UpperCAmelCase = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __UpperCAmelCase = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __UpperCAmelCase = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(snake_case_ ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": _lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowercase : Dict = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
397
"""simple docstring""" def lowercase__ ( snake_case_ :int = 600_851_475_143 ): try: __UpperCAmelCase = int(snake_case_ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) __UpperCAmelCase = 2 __UpperCAmelCase = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 __UpperCAmelCase = i while n % i == 0: __UpperCAmelCase = n // i i += 1 return int(snake_case_ ) if __name__ == "__main__": print(f"""{solution() = }""")
397
1
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
276
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __magic_name__ = '''3''' print('''Python version:''', sys.version) print('''transformers version:''', transformers.__version__) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) print('''NCCL version:''', torch.cuda.nccl.version()) except ImportError: print('''Torch version:''', None) try: import deepspeed print('''DeepSpeed version:''', deepspeed.__version__) except ImportError: print('''DeepSpeed version:''', None) try: import tensorflow as tf print('''TensorFlow version:''', tf.__version__) print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU'''))) print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU'''))) except ImportError: print('''TensorFlow version:''', None)
276
1
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ (tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ): super().__init__() lowerCamelCase__ = pad_token_id lowerCamelCase__ = max_length lowerCamelCase__ = vocab lowerCamelCase__ = merges lowerCamelCase__ = BytePairTokenizer(__A ,__A ,sequence_length=__A ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase ): lowerCamelCase__ = [""" """.join(__A ) for m in tokenizer.bpe_ranks.keys()] lowerCamelCase__ = tokenizer.get_vocab() return cls(__A ,__A ,*__A ,**__A ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ,*_lowerCAmelCase ,**_lowerCAmelCase ): lowerCamelCase__ = GPTaTokenizer.from_pretrained(__A ,*__A ,**__A ) return cls.from_tokenizer(__A ,*__A ,**__A ) @classmethod def UpperCamelCase_ ( cls ,_lowerCAmelCase ): return cls(**__A ) def UpperCamelCase_ ( self ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self.tf_tokenizer(__A ) lowerCamelCase__ = tf.ones_like(__A ) if self.pad_token_id is not None: # pad the tokens up to max length lowerCamelCase__ = max_length if max_length is not None else self.max_length if max_length is not None: lowerCamelCase__ , lowerCamelCase__ = pad_model_inputs( __A ,max_seq_length=__A ,pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
717
'''simple docstring''' from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, ...] ): lowerCamelCase__ = "" lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 for keychar, cipherchar in zip(cycle(__lowerCAmelCase ) , __lowerCAmelCase ): lowerCamelCase__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowerCAmelCase ) return decoded def A__ ( __lowerCAmelCase : list[int] ): lowerCamelCase__ = [] for key in product(__lowerCAmelCase , repeat=3 ): lowerCamelCase__ = try_key(__lowerCAmelCase , __lowerCAmelCase ) if encoded is not None: possibles.append(__lowerCAmelCase ) return possibles def A__ ( __lowerCAmelCase : list[str] , __lowerCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def A__ ( __lowerCAmelCase : str = "p059_cipher.txt" ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = Path(__lowerCAmelCase ).parent.joinpath(__lowerCAmelCase ).read_text(encoding="""utf-8""" ) lowerCamelCase__ = [int(__lowerCAmelCase ) for number in data.strip().split(""",""" )] lowerCamelCase__ = filter_valid_chars(__lowerCAmelCase ) for common_word in COMMON_WORDS: lowerCamelCase__ = filter_common_word(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) == 1: break lowerCamelCase__ = possibles[0] return sum(ord(__lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'{solution() = }')
9
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
664
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : int =logging.get_logger(__name__) __magic_name__ : List[Any] ={} class UpperCamelCase_ ( A ): """simple docstring""" UpperCAmelCase__ : int = '''llama''' UpperCAmelCase__ : Any = ['''past_key_values'''] def __init__( self : List[Any] , _lowerCamelCase : List[Any]=3_20_00 , _lowerCamelCase : Optional[Any]=40_96 , _lowerCamelCase : Tuple=1_10_08 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="silu" , _lowerCamelCase : Optional[Any]=20_48 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : Union[str, Any]=1e-6 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Dict=0 , _lowerCamelCase : int=1 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : List[str]=None , **_lowerCamelCase : List[Any] , ) -> Any: __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = intermediate_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: __magic_name__ = num_attention_heads __magic_name__ = num_key_value_heads __magic_name__ = hidden_act __magic_name__ = initializer_range __magic_name__ = rms_norm_eps __magic_name__ = pretraining_tp __magic_name__ = use_cache __magic_name__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , ) def __A ( self : Union[str, Any] ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'got {self.rope_scaling}' ) __magic_name__ = self.rope_scaling.get("type" , _lowerCamelCase ) __magic_name__ = self.rope_scaling.get("factor" , _lowerCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
664
1
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig a = logging.get_logger(__name__) a = "T5Config" def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> jnp.ndarray: _UpperCAmelCase = jnp.zeros_like(snake_case ) _UpperCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) _UpperCAmelCase = shifted_input_ids.at[:, 0].set(snake_case ) _UpperCAmelCase = jnp.where(shifted_input_ids == -1_0_0 , snake_case , snake_case ) return shifted_input_ids class _A ( __lowercase ): __a = """mt5""" __a = MTaConfig class _A ( __lowercase ): __a = """mt5""" __a = MTaConfig class _A ( __lowercase ): __a = """mt5""" __a = MTaConfig
175
import math def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float: if ( not isinstance(snake_case , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * power_factor def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float: if ( not isinstance(snake_case , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
175
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase = { 'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'], 'convert_funnel_original_tf_checkpoint_to_pytorch': [], 'tokenization_funnel': ['FunnelTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ['FunnelTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ 'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'FunnelBaseModel', 'FunnelForMaskedLM', 'FunnelForMultipleChoice', 'FunnelForPreTraining', 'FunnelForQuestionAnswering', 'FunnelForSequenceClassification', 'FunnelForTokenClassification', 'FunnelModel', 'FunnelPreTrainedModel', 'load_tf_weights_in_funnel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ 'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFFunnelBaseModel', 'TFFunnelForMaskedLM', 'TFFunnelForMultipleChoice', 'TFFunnelForPreTraining', 'TFFunnelForQuestionAnswering', 'TFFunnelForSequenceClassification', 'TFFunnelForTokenClassification', 'TFFunnelModel', 'TFFunnelPreTrainedModel', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
96
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = { 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = "efficientnet" def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]: super().__init__(**__snake_case ) __magic_name__: str = num_channels __magic_name__: List[str] = image_size __magic_name__: List[str] = width_coefficient __magic_name__: Optional[Any] = depth_coefficient __magic_name__: Tuple = depth_divisor __magic_name__: Dict = kernel_sizes __magic_name__: int = in_channels __magic_name__: str = out_channels __magic_name__: Dict = depthwise_padding __magic_name__: Union[str, Any] = strides __magic_name__: Dict = num_block_repeats __magic_name__: Tuple = expand_ratios __magic_name__: List[str] = squeeze_expansion_ratio __magic_name__: Any = hidden_act __magic_name__: Tuple = hidden_dim __magic_name__: int = pooling_type __magic_name__: int = initializer_range __magic_name__: List[str] = batch_norm_eps __magic_name__: str = batch_norm_momentum __magic_name__: List[str] = dropout_rate __magic_name__: Dict = drop_connect_rate __magic_name__: Optional[Any] = sum(__snake_case ) * 4 class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = version.parse("1.11" ) @property def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase__ ( self : List[Any] ) -> float: return 1E-5
96
1
from __future__ import annotations from fractions import Fraction def A ( UpperCAmelCase , UpperCAmelCase ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def A ( UpperCAmelCase ): _snake_case : Any = [] _snake_case : List[Any] = 11 _snake_case : List[str] = int("1" + "0" * digit_len ) for num in range(UpperCAmelCase , UpperCAmelCase ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(UpperCAmelCase , UpperCAmelCase ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 _snake_case : Union[str, Any] = 10 return solutions def A ( UpperCAmelCase = 2 ): _snake_case : Any = 1.0 for fraction in fraction_list(UpperCAmelCase ): _snake_case : Any = Fraction(UpperCAmelCase ) result *= frac.denominator / frac.numerator return int(UpperCAmelCase ) if __name__ == "__main__": print(solution())
701
import sys import turtle def A ( UpperCAmelCase , UpperCAmelCase ): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 ) triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 ) triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) __lowerCAmelCase :Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') __lowerCAmelCase :Optional[int] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
278
0
from __future__ import annotations from random import choice def __lowerCamelCase ( A__ : Dict ) -> Optional[int]: return choice(A__ ) def __lowerCamelCase ( A__ : list[int] , A__ : int ) -> int: lowerCamelCase_ : Optional[int] = random_pivot(A__ ) # partition based on pivot # linear time lowerCamelCase_ : Optional[Any] = [e for e in lst if e < pivot] lowerCamelCase_ : List[Any] = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(A__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(A__ ) < k - 1: return kth_number(A__ , k - len(A__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(A__ , A__ ) if __name__ == "__main__": import doctest doctest.testmod()
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() snake_case__ : Dict = logging.get_logger(__name__) def __lowerCamelCase ( A__ : Optional[Any] ) -> List[str]: lowerCamelCase_ : int = """huggingface/label-files""" lowerCamelCase_ : Dict = """imagenet-1k-id2label.json""" lowerCamelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase_ : str = {int(A__ ): v for k, v in idalabel.items()} lowerCamelCase_ : List[Any] = {v: k for k, v in idalabel.items()} lowerCamelCase_ : str = """std_conv""" if """bit""" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" lowerCamelCase_ : Optional[Any] = BitConfig( conv_layer=A__ , num_labels=1000 , idalabel=A__ , labelaid=A__ , ) return config def __lowerCamelCase ( A__ : str ) -> Any: if "stem.conv" in name: lowerCamelCase_ : Any = name.replace("""stem.conv""" , """bit.embedder.convolution""" ) if "blocks" in name: lowerCamelCase_ : Dict = name.replace("""blocks""" , """layers""" ) if "head.fc" in name: lowerCamelCase_ : Optional[Any] = name.replace("""head.fc""" , """classifier.1""" ) if name.startswith("""norm""" ): lowerCamelCase_ : int = """bit.""" + name if "bit" not in name and "classifier" not in name: lowerCamelCase_ : str = """bit.encoder.""" + name return name def __lowerCamelCase ( ) -> List[Any]: lowerCamelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( A__ : List[Any] , A__ : List[str] , A__ : Tuple=False ) -> List[str]: lowerCamelCase_ : Optional[Any] = get_config(A__ ) # load original model from timm lowerCamelCase_ : Optional[Any] = create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model lowerCamelCase_ : Optional[int] = timm_model.state_dict() for key in state_dict.copy().keys(): lowerCamelCase_ : int = state_dict.pop(A__ ) lowerCamelCase_ : Union[str, Any] = val.squeeze() if """head""" in key else val # load HuggingFace model lowerCamelCase_ : Tuple = BitForImageClassification(A__ ) model.eval() model.load_state_dict(A__ ) # create image processor lowerCamelCase_ : Optional[Any] = create_transform(**resolve_data_config({} , model=A__ ) ) lowerCamelCase_ : List[Any] = transform.transforms lowerCamelCase_ : List[Any] = { """bilinear""": PILImageResampling.BILINEAR, """bicubic""": PILImageResampling.BICUBIC, """nearest""": PILImageResampling.NEAREST, } lowerCamelCase_ : List[str] = BitImageProcessor( do_resize=A__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=A__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) lowerCamelCase_ : int = prepare_img() lowerCamelCase_ : int = transform(A__ ).unsqueeze(0 ) lowerCamelCase_ : List[str] = processor(A__ , return_tensors="""pt""" ).pixel_values # verify pixel values assert torch.allclose(A__ , A__ ) # verify logits with torch.no_grad(): lowerCamelCase_ : str = model(A__ ) lowerCamelCase_ : int = outputs.logits print("""Logits:""" , logits[0, :3] ) print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] ) lowerCamelCase_ : List[Any] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(A__ ).mkdir(exist_ok=A__ ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(A__ ) processor.save_pretrained(A__ ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": snake_case__ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) snake_case__ : int = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar UpperCAmelCase : Dict = TypeVar('T') class lowerCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Any , UpperCamelCase : list[T] , UpperCamelCase : Callable[[T, T], T] ): '''simple docstring''' __UpperCAmelCase : Any | T = None __UpperCAmelCase : int = len(UpperCamelCase ) __UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr __UpperCAmelCase : Dict = fnc self.build() def lowerCamelCase__ ( self : Any ): '''simple docstring''' for p in range(self.N - 1 , 0 , -1 ): __UpperCAmelCase : Tuple = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : T ): '''simple docstring''' p += self.N __UpperCAmelCase : Tuple = v while p > 1: __UpperCAmelCase : Dict = p // 2 __UpperCAmelCase : List[str] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : int , UpperCamelCase : int ): # noqa: E741 '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = l + self.N, r + self.N __UpperCAmelCase : T | None = None while l <= r: if l % 2 == 1: __UpperCAmelCase : int = self.st[l] if res is None else self.fn(UpperCamelCase , self.st[l] ) if r % 2 == 0: __UpperCAmelCase : Optional[int] = self.st[r] if res is None else self.fn(UpperCamelCase , self.st[r] ) __UpperCAmelCase ,__UpperCAmelCase : Any = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce UpperCAmelCase : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] UpperCAmelCase : Optional[Any] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } UpperCAmelCase : Any = SegmentTree(test_array, min) UpperCAmelCase : Dict = SegmentTree(test_array, max) UpperCAmelCase : Optional[int] = SegmentTree(test_array, lambda a, b: a + b) def lowerCamelCase ( ) -> None: '''simple docstring''' for i in range(len(_UpperCamelCase ) ): for j in range(_UpperCamelCase , len(_UpperCamelCase ) ): __UpperCAmelCase : int = reduce(_UpperCamelCase , test_array[i : j + 1] ) __UpperCAmelCase : Optional[int] = reduce(_UpperCamelCase , test_array[i : j + 1] ) __UpperCAmelCase : List[Any] = reduce(lambda _UpperCamelCase , _UpperCamelCase : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(_UpperCamelCase , _UpperCamelCase ) assert max_range == max_segment_tree.query(_UpperCamelCase , _UpperCamelCase ) assert sum_range == sum_segment_tree.query(_UpperCamelCase , _UpperCamelCase ) test_all_segments() for index, value in test_updates.items(): UpperCAmelCase : Optional[int] = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
299
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCamelCase__ ( A , A ): """simple docstring""" @register_to_config def __init__( self : Dict , UpperCamelCase : int = 768 , ): '''simple docstring''' super().__init__() __UpperCAmelCase : Dict = nn.Parameter(torch.zeros(1 , UpperCamelCase ) ) __UpperCAmelCase : Any = nn.Parameter(torch.ones(1 , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Union[str, torch.device]] = None , UpperCamelCase : Optional[torch.dtype] = None , ): '''simple docstring''' __UpperCAmelCase : Any = nn.Parameter(self.mean.to(UpperCamelCase ).to(UpperCamelCase ) ) __UpperCAmelCase : Optional[Any] = nn.Parameter(self.std.to(UpperCamelCase ).to(UpperCamelCase ) ) return self def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : int = (embeds - self.mean) * 1.0 / self.std return embeds def lowerCamelCase__ ( self : Dict , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Dict = (embeds * self.std) + self.mean return embeds
299
1
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1_28 , __a=32 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self) -> Any: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = self.prepare_config_and_inputs() _UpperCamelCase = True _UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = NezhaModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) _UpperCamelCase = model(__a , token_type_ids=__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict: '''simple docstring''' _UpperCamelCase = True _UpperCamelCase = NezhaModel(__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , ) _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = NezhaForMaskedLM(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = NezhaForNextSentencePrediction(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = NezhaForPreTraining(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , next_sentence_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = NezhaForQuestionAnswering(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = NezhaForSequenceClassification(__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = NezhaForTokenClassification(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.num_choices _UpperCamelCase = NezhaForMultipleChoice(config=__a) model.to(__a) model.eval() _UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) lowercase__ = ( { 'feature-extraction': NezhaModel, 'fill-mask': NezhaForMaskedLM, 'question-answering': NezhaForQuestionAnswering, 'text-classification': NezhaForSequenceClassification, 'token-classification': NezhaForTokenClassification, 'zero-shot': NezhaForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True def UpperCAmelCase ( self , __a , __a , __a=False) -> str: '''simple docstring''' _UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class in get_values(__a): _UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a) _UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a) return inputs_dict def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = NezhaModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' # This regression test was failing with PyTorch < 1.3 ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() _UpperCamelCase = None self.model_tester.create_and_check_model_as_decoder( __a , __a , __a , __a , __a , __a , __a , __a , __a , ) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a) @slow def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = NezhaModel.from_pretrained(__a) self.assertIsNotNone(__a) @slow @require_torch_gpu def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return _UpperCamelCase = True _UpperCamelCase = model_class(config=__a) _UpperCamelCase = self._prepare_for_class(__a , __a) _UpperCamelCase = torch.jit.trace( __a , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu'''))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__a , os.path.join(__a , '''bert.pt''')) _UpperCamelCase = torch.jit.load(os.path.join(__a , '''bert.pt''') , map_location=__a) loaded(inputs_dict['''input_ids'''].to(__a) , inputs_dict['''attention_mask'''].to(__a)) @require_torch class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''') _UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]]) _UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCamelCase = model(__a , attention_mask=__a)[0] _UpperCamelCase = torch.Size((1, 6, 7_68)) self.assertEqual(output.shape , __a) _UpperCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4)) @slow def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''') _UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]]) _UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCamelCase = model(__a , attention_mask=__a)[0] _UpperCamelCase = torch.Size((1, 6, 2_11_28)) self.assertEqual(output.shape , __a) _UpperCamelCase = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
19
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]: lowercase_ = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]: lowercase_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: lowercase_ = s_dict.pop(SCREAMING_SNAKE_CASE_ ) elif "subsample" in key: lowercase_ = s_dict.pop(SCREAMING_SNAKE_CASE_ ) def A_ ( SCREAMING_SNAKE_CASE_ ) ->Dict: lowercase_ , lowercase_ = emb.weight.shape lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) lowercase_ = emb.weight.data return lin_layer def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any: lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) lowercase_ = mam_aaa["""args"""] lowercase_ = mam_aaa["""model"""] lowercase_ = state_dict["""decoder.output_projection.weight"""] remove_ignore_keys_(SCREAMING_SNAKE_CASE_ ) rename_keys(SCREAMING_SNAKE_CASE_ ) lowercase_ = state_dict["""decoder.embed_tokens.weight"""].shape[0] lowercase_ = args.share_decoder_input_output_embed lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in args.conv_kernel_sizes.split(""",""" )] lowercase_ = SpeechaTextConfig( vocab_size=SCREAMING_SNAKE_CASE_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(SCREAMING_SNAKE_CASE_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_00 , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE_ , ) lowercase_ = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f""" but all the following weights are missing {missing}""" ) if tie_embeds: lowercase_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowercase_ = lm_head_weights model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") __snake_case = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
451
0
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers snake_case__ : str = 'python tqdm regex requests packaging filelock numpy tokenizers'.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('dataclasses') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('importlib_metadata') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def __lowerCamelCase ( A__ : Any , A__ : Optional[Any]=None ) -> Any: require_version(deps[pkg] , A__ )
719
from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : List[str] = { 'Visual-Attention-Network/van-base': ( 'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json' ), } class SCREAMING_SNAKE_CASE_ (a__ ): '''simple docstring''' _a = "van" def __init__( self : int , __a : List[Any]=224 , __a : Dict=3 , __a : List[str]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : str=[64, 128, 320, 512] , __a : Dict=[3, 3, 12, 3] , __a : List[str]=[8, 8, 4, 4] , __a : List[str]="gelu" , __a : Optional[Any]=0.02 , __a : Dict=1e-6 , __a : List[str]=1e-2 , __a : Optional[int]=0.0 , __a : str=0.0 , **__a : Optional[Any] , ) ->str: super().__init__(**__a ) lowerCamelCase_ : Optional[Any] = image_size lowerCamelCase_ : List[str] = num_channels lowerCamelCase_ : Union[str, Any] = patch_sizes lowerCamelCase_ : List[Any] = strides lowerCamelCase_ : Union[str, Any] = hidden_sizes lowerCamelCase_ : Tuple = depths lowerCamelCase_ : str = mlp_ratios lowerCamelCase_ : Any = hidden_act lowerCamelCase_ : Union[str, Any] = initializer_range lowerCamelCase_ : Union[str, Any] = layer_norm_eps lowerCamelCase_ : Union[str, Any] = layer_scale_init_value lowerCamelCase_ : List[str] = drop_path_rate lowerCamelCase_ : str = dropout_rate
171
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ : int = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = ['MobileViTFeatureExtractor'] SCREAMING_SNAKE_CASE__ : List[Any] = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
643
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class _SCREAMING_SNAKE_CASE ( A , A , unittest.TestCase ): __SCREAMING_SNAKE_CASE = IFPipeline __SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS __SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {'''latents'''} def __snake_case( self ): return self._get_dummy_components() def __snake_case( self , A_ , A_=0 ): if str(A_ ).startswith("""mps""" ): _UpperCAmelCase : Tuple = torch.manual_seed(A_ ) else: _UpperCAmelCase : Optional[Any] = torch.Generator(device=A_ ).manual_seed(A_ ) _UpperCAmelCase : Any = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __snake_case( self ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __snake_case( self ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __snake_case( self ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __snake_case( self ): self._test_save_load_local() def __snake_case( self ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __snake_case( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __snake_case( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case( self ): # if _UpperCAmelCase : List[str] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) _UpperCAmelCase : Dict = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) _UpperCAmelCase,_UpperCAmelCase : Dict = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Any = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(A_ , A_ , A_ , A_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img _UpperCAmelCase : Any = IFImgaImgPipeline(**pipe_a.components ) _UpperCAmelCase : Union[str, Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(A_ , A_ , A_ , A_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting _UpperCAmelCase : Optional[int] = IFInpaintingPipeline(**pipe_a.components ) _UpperCAmelCase : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(A_ , A_ , A_ , A_ ) def __snake_case( self , A_ , A_ , A_ , A_ ): # pipeline 1 _start_torch_memory_measurement() _UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase : List[str] = pipe_a( prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , ) _UpperCAmelCase : int = output.images[0] assert image.shape == (64, 64, 3) _UpperCAmelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 _UpperCAmelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(A_ , A_ ) # pipeline 2 _start_torch_memory_measurement() _UpperCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : str = pipe_a( prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase : int = output.images[0] assert image.shape == (2_56, 2_56, 3) _UpperCAmelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _UpperCAmelCase : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(A_ , A_ ) def __snake_case( self , A_ , A_ , A_ , A_ ): # pipeline 1 _start_torch_memory_measurement() _UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase : List[str] = pipe_a( prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , ) _UpperCAmelCase : int = output.images[0] assert image.shape == (64, 64, 3) _UpperCAmelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _UpperCAmelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(A_ , A_ ) # pipeline 2 _start_torch_memory_measurement() _UpperCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : Optional[int] = pipe_a( prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase : Dict = output.images[0] assert image.shape == (2_56, 2_56, 3) _UpperCAmelCase : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _UpperCAmelCase : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(A_ , A_ ) def __snake_case( self , A_ , A_ , A_ , A_ ): # pipeline 1 _start_torch_memory_measurement() _UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(A_ ) _UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase : str = pipe_a( prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , ) _UpperCAmelCase : Optional[Any] = output.images[0] assert image.shape == (64, 64, 3) _UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 _UpperCAmelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(A_ , A_ ) # pipeline 2 _start_torch_memory_measurement() _UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 ) _UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(A_ ) _UpperCAmelCase : str = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(A_ ) _UpperCAmelCase : Union[str, Any] = pipe_a( prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , ) _UpperCAmelCase : Any = output.images[0] assert image.shape == (2_56, 2_56, 3) _UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 _UpperCAmelCase : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(A_ , A_ ) def a__ ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
643
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=18 , UpperCamelCase__=30 , UpperCamelCase__=400 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCamelCase__=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCamelCase__=True , ): A__ : Any = size if size is not None else {'''height''': 224, '''width''': 224} A__ : Optional[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} A__ : Optional[Any] = parent A__ : Optional[Any] = batch_size A__ : List[str] = num_channels A__ : Tuple = image_size A__ : Union[str, Any] = min_resolution A__ : List[str] = max_resolution A__ : Union[str, Any] = do_resize A__ : int = size A__ : Tuple = do_center_crop A__ : List[Any] = crop_size A__ : List[str] = do_normalize A__ : List[Any] = image_mean A__ : Any = image_std A__ : Union[str, Any] = do_convert_rgb def __snake_case ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __snake_case ( self , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False ): assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: A__ : Union[str, Any] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: A__ : int = [] for i in range(self.batch_size ): A__ , A__ : Optional[int] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension A__ : Tuple = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] if torchify: A__ : int = [torch.from_numpy(UpperCamelCase__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ): '''simple docstring''' _lowerCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None def __snake_case ( self ): A__ : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__ ) @property def __snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self ): A__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_convert_rgb''' ) ) def __snake_case ( self ): A__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) A__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __snake_case ( self ): pass def __snake_case ( self ): # Initialize image_processing A__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ : str = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input A__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched A__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __snake_case ( self ): # Initialize image_processing A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched A__ : int = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __snake_case ( self ): # Initialize image_processing A__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input A__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched A__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, unittest.TestCase ): '''simple docstring''' _lowerCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None def __snake_case ( self ): A__ : int = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__ ) A__ : Optional[Any] = 3 @property def __snake_case ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __snake_case ( self ): A__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''do_convert_rgb''' ) ) def __snake_case ( self ): pass def __snake_case ( self ): # Initialize image_processing A__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched A__ : Tuple = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
55
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> List[Any]: """simple docstring""" A__ : Optional[Any] = 0 A__ : Optional[Any] = len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple: """simple docstring""" if len(__UpperCamelCase ) <= 1: return arr, 0 A__ : Optional[int] = len(__UpperCamelCase ) // 2 A__ : List[str] = arr[0:mid] A__ : Union[str, Any] = arr[mid:] A__ , A__ : List[Any] = count_inversions_recursive(__UpperCamelCase ) A__ , A__ : int = count_inversions_recursive(__UpperCamelCase ) A__ , A__ : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) A__ : Any = inversion_p + inversions_q + cross_inversions return c, num_inversions def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) -> Dict: """simple docstring""" A__ : str = [] A__ : Tuple = 0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def SCREAMING_SNAKE_CASE ( ) -> Tuple: """simple docstring""" A__ : List[str] = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A__ : int = count_inversions_bf(__UpperCamelCase ) A__ , A__ : int = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A__ : Optional[Any] = count_inversions_bf(__UpperCamelCase ) A__ , A__ : Dict = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __UpperCamelCase ) # an empty list should also have zero inversions A__ : Union[str, Any] = [] A__ : Union[str, Any] = count_inversions_bf(__UpperCamelCase ) A__ , A__ : Any = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __UpperCamelCase ) if __name__ == "__main__": main()
55
1
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
43
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCamelCase : List[str] = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE( a_ ): def __init__( self: Dict , *UpperCamelCase: Any , **UpperCamelCase: List[str] ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
328
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class lowerCamelCase__ : def __init__( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple=13 , UpperCamelCase : Any=7 , UpperCamelCase : Any=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Tuple=True , UpperCamelCase : List[str]=99 , UpperCamelCase : List[str]=32 , UpperCamelCase : str=2 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Any=37 , UpperCamelCase : Any="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : int=512 , UpperCamelCase : Union[str, Any]=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[str]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Any=None , ): '''simple docstring''' __UpperCAmelCase : Dict = parent __UpperCAmelCase : int = 13 __UpperCAmelCase : int = 7 __UpperCAmelCase : Any = True __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Dict = 99 __UpperCAmelCase : Optional[Any] = 32 __UpperCAmelCase : Tuple = 2 __UpperCAmelCase : Optional[Any] = 4 __UpperCAmelCase : Dict = 37 __UpperCAmelCase : Union[str, Any] = """gelu""" __UpperCAmelCase : Any = 0.1 __UpperCAmelCase : str = 0.1 __UpperCAmelCase : int = 512 __UpperCAmelCase : List[Any] = 16 __UpperCAmelCase : Optional[int] = 2 __UpperCAmelCase : str = 0.02 __UpperCAmelCase : Union[str, Any] = 3 __UpperCAmelCase : List[str] = 4 __UpperCAmelCase : Tuple = None def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Optional[int] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Optional[Any] = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Any = None __UpperCAmelCase : str = None __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Tuple = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFRoFormerModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = [input_ids, input_mask] __UpperCAmelCase : Optional[int] = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Tuple = TFRoFormerForCausalLM(config=UpperCamelCase ) __UpperCAmelCase : Any = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __UpperCAmelCase : Tuple = model(UpperCamelCase )["""logits"""] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : int = TFRoFormerForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : int = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[str] = self.num_labels __UpperCAmelCase : Dict = TFRoFormerForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __UpperCAmelCase : Optional[int] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : int = self.num_choices __UpperCAmelCase : Union[str, Any] = TFRoFormerForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : Any = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : Optional[int] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Optional[Any] = TFRoFormerForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __UpperCAmelCase : Dict = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = TFRoFormerForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( __UpperCAmelCase ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): __a = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": TFRoFormerModel, """fill-mask""": TFRoFormerForMaskedLM, """question-answering""": TFRoFormerForQuestionAnswering, """text-classification""": TFRoFormerForSequenceClassification, """text-generation""": TFRoFormerForCausalLM, """token-classification""": TFRoFormerForTokenClassification, """zero-shot""": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : str ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFRoFormerModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" ) self.assertIsNotNone(UpperCamelCase ) @require_tf class lowerCamelCase__ ( unittest.TestCase ): @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __UpperCAmelCase : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase : Dict = model(UpperCamelCase )[0] # TODO Replace vocab size __UpperCAmelCase : Optional[int] = 50_000 __UpperCAmelCase : Any = [1, 6, vocab_size] self.assertEqual(output.shape , UpperCamelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __UpperCAmelCase : int = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) @require_tf class lowerCamelCase__ ( unittest.TestCase ): __a = 1E-4 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Dict = tf.constant([[4, 10]] ) __UpperCAmelCase : List[str] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __UpperCAmelCase : str = emba(input_ids.shape ) __UpperCAmelCase : Union[str, Any] = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , atol=self.tolerance ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Any = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) __UpperCAmelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) __UpperCAmelCase : Tuple = emba.weight[:3, :5] tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , atol=self.tolerance ) @require_tf class lowerCamelCase__ ( unittest.TestCase ): __a = 1E-4 def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __UpperCAmelCase : Tuple = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __UpperCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) __UpperCAmelCase : int = embed_positions([2, 16, 768] )[None, None, :, :] __UpperCAmelCase : Optional[int] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) __UpperCAmelCase : List[Any] = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCamelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCamelCase , atol=self.tolerance )
708
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]: '''simple docstring''' __UpperCAmelCase : Tuple = 2 __UpperCAmelCase : Optional[Any] = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_UpperCamelCase ) if n > 1: factors.append(_UpperCamelCase ) return factors if __name__ == "__main__": import doctest doctest.testmod()
299
0
import random class __lowercase : @staticmethod def _a(snake_case : str ) -> tuple[list[int], list[int]]: _lowercase : Optional[Any] = [ord(snake_case ) for i in text] _lowercase : Optional[Any] = [] _lowercase : Optional[int] = [] for i in plain: _lowercase : Dict = random.randint(1 , 300 ) _lowercase : List[Any] = (i + k) * k cipher.append(snake_case ) key.append(snake_case ) return cipher, key @staticmethod def _a(snake_case : list[int] , snake_case : list[int] ) -> str: _lowercase : Any = [] for i in range(len(snake_case ) ): _lowercase : List[Any] = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(snake_case ) ) return "".join(snake_case ) if __name__ == "__main__": UpperCamelCase_ , UpperCamelCase_ : List[str] = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
461
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging UpperCamelCase_ : int = logging.get_logger(__name__) def UpperCamelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' _lowercase : List[Any] = nn.functional.normalize(_UpperCAmelCase ) _lowercase : List[str] = nn.functional.normalize(_UpperCAmelCase ) return torch.mm(_UpperCAmelCase , normalized_text_embeds.t() ) class __lowercase ( __snake_case ): _A = CLIPConfig _A = ["CLIPEncoderLayer"] def __init__(self : List[str] , snake_case : CLIPConfig ) -> Optional[int]: super().__init__(snake_case ) _lowercase : Tuple = CLIPVisionModel(config.vision_config ) _lowercase : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case ) _lowercase : int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case ) _lowercase : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case ) _lowercase : Any = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case ) _lowercase : Dict = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case ) @torch.no_grad() def _a(self : int , snake_case : Tuple , snake_case : Optional[Any] ) -> Union[str, Any]: _lowercase : str = self.vision_model(snake_case )[1] # pooled_output _lowercase : List[Any] = self.visual_projection(snake_case ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowercase : str = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy() _lowercase : Optional[int] = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy() _lowercase : List[str] = [] _lowercase : int = image_embeds.shape[0] for i in range(snake_case ): _lowercase : int = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _lowercase : Optional[int] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _lowercase : Union[str, Any] = special_cos_dist[i][concept_idx] _lowercase : Optional[int] = self.special_care_embeds_weights[concept_idx].item() _lowercase : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) _lowercase : Union[str, Any] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): _lowercase : Tuple = cos_dist[i][concept_idx] _lowercase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item() _lowercase : str = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(snake_case ) result.append(snake_case ) _lowercase : Dict = [len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _a(self : str , snake_case : torch.FloatTensor , snake_case : torch.FloatTensor ) -> List[str]: _lowercase : Dict = self.vision_model(snake_case )[1] # pooled_output _lowercase : Any = self.visual_projection(snake_case ) _lowercase : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ) _lowercase : Optional[Any] = cosine_distance(snake_case , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _lowercase : Optional[int] = 0.0 _lowercase : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _lowercase : List[str] = torch.any(special_scores > 0 , dim=1 ) _lowercase : List[Any] = special_care * 0.01 _lowercase : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _lowercase : Any = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _lowercase : List[Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
461
1
"""simple docstring""" from __future__ import annotations __SCREAMING_SNAKE_CASE =list[list[int]] # assigning initial values to the grid __SCREAMING_SNAKE_CASE =[ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __SCREAMING_SNAKE_CASE =[ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowercase__( __SCREAMING_SNAKE_CASE : Matrix , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowercase__( __SCREAMING_SNAKE_CASE : Matrix ): for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowercase__( __SCREAMING_SNAKE_CASE : Matrix ): if location := find_empty_location(__SCREAMING_SNAKE_CASE ): lowercase_ , lowercase_ : List[Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase_ : Tuple = digit if sudoku(__SCREAMING_SNAKE_CASE ) is not None: return grid lowercase_ : Tuple = 0 return None def lowercase__( __SCREAMING_SNAKE_CASE : Matrix ): for row in grid: for cell in row: print(__SCREAMING_SNAKE_CASE , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") __SCREAMING_SNAKE_CASE =sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
477
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_features', 'is_longer'] def __init__( self ,__UpperCamelCase=64 ,__UpperCamelCase=4_8000 ,__UpperCamelCase=480 ,__UpperCamelCase=10 ,__UpperCamelCase=1024 ,__UpperCamelCase=0.0 ,__UpperCamelCase=False ,__UpperCamelCase = 0 ,__UpperCamelCase = 1_4000 ,__UpperCamelCase = None ,__UpperCamelCase = "fusion" ,__UpperCamelCase = "repeatpad" ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Union[str, Any] = top_db lowercase_ : Any = truncation lowercase_ : str = padding lowercase_ : Optional[Any] = fft_window_size lowercase_ : List[Any] = (fft_window_size >> 1) + 1 lowercase_ : Any = hop_length lowercase_ : List[Any] = max_length_s lowercase_ : Any = max_length_s * sampling_rate lowercase_ : Optional[int] = sampling_rate lowercase_ : List[Any] = frequency_min lowercase_ : str = frequency_max lowercase_ : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__UpperCamelCase ,min_frequency=__UpperCamelCase ,max_frequency=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,norm=__UpperCamelCase ,mel_scale='htk' ,) lowercase_ : Optional[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__UpperCamelCase ,min_frequency=__UpperCamelCase ,max_frequency=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,norm='slaney' ,mel_scale='slaney' ,) def _UpperCAmelCase ( self ) -> Dict[str, Any]: '''simple docstring''' lowercase_ : int = copy.deepcopy(self.__dict__ ) lowercase_ : Any = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> np.ndarray: '''simple docstring''' lowercase_ : Union[str, Any] = spectrogram( __UpperCamelCase ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__UpperCamelCase ,log_mel='dB' ,) return log_mel_spectrogram.T def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowercase_ : Optional[int] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowercase_ : List[str] = [0] # randomly choose index for each part lowercase_ : List[str] = np.random.choice(ranges[0] ) lowercase_ : Optional[Any] = np.random.choice(ranges[1] ) lowercase_ : Union[str, Any] = np.random.choice(ranges[2] ) lowercase_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] lowercase_ : str = mel[idx_middle : idx_middle + chunk_frames, :] lowercase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :] lowercase_ : Tuple = torch.tensor(mel[None, None, :] ) lowercase_ : Optional[Any] = torch.nn.functional.interpolate( __UpperCamelCase ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=__UpperCamelCase ) lowercase_ : str = mel_shrink[0][0].numpy() lowercase_ : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.array: '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowercase_ : Any = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowercase_ : Any = len(__UpperCamelCase ) - max_length lowercase_ : List[str] = np.random.randint(0 ,overflow + 1 ) lowercase_ : Any = waveform[idx : idx + max_length] lowercase_ : Optional[int] = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowercase_ : str = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters ) lowercase_ : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowercase_ : Dict = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowercase_ : int = np.stack([mel, mel, mel, mel] ,axis=0 ) lowercase_ : Optional[Any] = False else: lowercase_ : int = self._random_mel_fusion(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Tuple = True else: raise NotImplementedError(f'''data_truncating {truncation} not implemented''' ) else: lowercase_ : Dict = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowercase_ : Optional[int] = int(max_length / len(__UpperCamelCase ) ) lowercase_ : Any = np.stack(np.tile(__UpperCamelCase ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowercase_ : Union[str, Any] = int(max_length / len(__UpperCamelCase ) ) lowercase_ : int = np.stack(np.tile(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : str = np.pad(__UpperCamelCase ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 ) if truncation == "fusion": lowercase_ : List[str] = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters ) lowercase_ : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: lowercase_ : Any = self._np_extract_fbank_features(__UpperCamelCase ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature: '''simple docstring''' lowercase_ : Union[str, Any] = truncation if truncation is not None else self.truncation lowercase_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowercase_ : Tuple = isinstance(__UpperCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowercase_ : List[str] = is_batched_numpy or ( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : str = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase_ : Dict = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase )] # convert to mel spectrogram, truncate and pad if needed. lowercase_ : List[str] = [ self._get_input_mel(__UpperCamelCase ,max_length if max_length else self.nb_max_samples ,__UpperCamelCase ,__UpperCamelCase ) for waveform in raw_speech ] lowercase_ : int = [] lowercase_ : int = [] for mel, longer in padded_inputs: input_mel.append(__UpperCamelCase ) is_longer.append(__UpperCamelCase ) if truncation == "fusion" and sum(__UpperCamelCase ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowercase_ : int = np.random.randint(0 ,len(__UpperCamelCase ) ) lowercase_ : Tuple = True if isinstance(input_mel[0] ,__UpperCamelCase ): lowercase_ : Union[str, Any] = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowercase_ : Any = [[longer] for longer in is_longer] lowercase_ : List[Any] = {'input_features': input_mel, 'is_longer': is_longer} lowercase_ : Union[str, Any] = BatchFeature(__UpperCamelCase ) if return_tensors is not None: lowercase_ : Any = input_features.convert_to_tensors(__UpperCamelCase ) return input_features
477
1
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: lowercase__ : List[str] = AudioClassificationPipeline(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase ) # test with a raw waveform lowercase__ : Tuple = np.zeros((34000,) ) lowercase__ : Union[str, Any] = np.zeros((14000,) ) return audio_classifier, [audioa, audio] def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: lowercase__ : Any = examples lowercase__ : Optional[int] = audio_classifier(__lowerCAmelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( __lowerCAmelCase , [ {'''score''': ANY(__lowerCAmelCase ), '''label''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''label''': ANY(__lowerCAmelCase )}, ] , ) lowercase__ : Dict = audio_classifier(__lowerCAmelCase , top_k=1 ) self.assertEqual( __lowerCAmelCase , [ {'''score''': ANY(__lowerCAmelCase ), '''label''': ANY(__lowerCAmelCase )}, ] , ) self.run_torchaudio(__lowerCAmelCase ) @require_torchaudio def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple: import datasets # test with a local file lowercase__ : str = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) lowercase__ : List[str] = dataset[0]["""audio"""]["""array"""] lowercase__ : Tuple = audio_classifier(__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ {'''score''': ANY(__lowerCAmelCase ), '''label''': ANY(__lowerCAmelCase )}, {'''score''': ANY(__lowerCAmelCase ), '''label''': ANY(__lowerCAmelCase )}, ] , ) @require_torch def _lowerCAmelCase( self ) -> Any: lowercase__ : Optional[int] = """anton-l/wav2vec2-random-tiny-classifier""" lowercase__ : Dict = pipeline('''audio-classification''' , model=__lowerCAmelCase ) lowercase__ : List[str] = np.ones((8000,) ) lowercase__ : Dict = audio_classifier(__lowerCAmelCase , top_k=4 ) lowercase__ : Optional[Any] = [ {"""score""": 0.0_8_4_2, """label""": """no"""}, {"""score""": 0.0_8_3_8, """label""": """up"""}, {"""score""": 0.0_8_3_7, """label""": """go"""}, {"""score""": 0.0_8_3_4, """label""": """right"""}, ] lowercase__ : List[Any] = [ {"""score""": 0.0_8_4_5, """label""": """stop"""}, {"""score""": 0.0_8_4_4, """label""": """on"""}, {"""score""": 0.0_8_4_1, """label""": """right"""}, {"""score""": 0.0_8_3_4, """label""": """left"""}, ] self.assertIn(nested_simplify(__lowerCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) lowercase__ : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} lowercase__ : str = audio_classifier(__lowerCAmelCase , top_k=4 ) self.assertIn(nested_simplify(__lowerCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def _lowerCAmelCase( self ) -> Tuple: import datasets lowercase__ : str = """superb/wav2vec2-base-superb-ks""" lowercase__ : Optional[int] = pipeline('''audio-classification''' , model=__lowerCAmelCase ) lowercase__ : int = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' ) lowercase__ : Optional[Any] = np.array(dataset[3]['''speech'''] , dtype=np.floataa ) lowercase__ : Optional[int] = audio_classifier(__lowerCAmelCase , top_k=4 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=3 ) , [ {'''score''': 0.9_8_1, '''label''': '''go'''}, {'''score''': 0.0_0_7, '''label''': '''up'''}, {'''score''': 0.0_0_6, '''label''': '''_unknown_'''}, {'''score''': 0.0_0_1, '''label''': '''down'''}, ] , ) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def _lowerCAmelCase( self ) -> Union[str, Any]: pass
152
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowercase : str = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]: lowercase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = val def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any: lowercase : str = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowercase : Dict = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) lowercase : Union[str, Any] = value else: lowercase : Tuple = value return new_state_dict def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> List[Any]: lowercase : str = """""" if is_panoptic: lowercase : Tuple = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) lowercase : Union[str, Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict lowercase : Dict = in_proj_weight[:256, :] lowercase : Optional[int] = in_proj_bias[:256] lowercase : Tuple = in_proj_weight[256:512, :] lowercase : Any = in_proj_bias[256:512] lowercase : Any = in_proj_weight[-256:, :] lowercase : Dict = in_proj_bias[-256:] def _snake_case( ) -> Tuple: lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple: lowercase : str = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowercase : Tuple = """resnet101""" if "dc5" in model_name: lowercase : List[Any] = True lowercase : Optional[Any] = """panoptic""" in model_name if is_panoptic: lowercase : Optional[int] = 250 else: lowercase : Tuple = 91 lowercase : Any = """huggingface/label-files""" lowercase : int = """coco-detection-id2label.json""" lowercase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) ) lowercase : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} lowercase : int = idalabel lowercase : List[Any] = {v: k for k, v in idalabel.items()} # load image processor lowercase : int = """coco_panoptic""" if is_panoptic else """coco_detection""" lowercase : List[Any] = ConditionalDetrImageProcessor(format=SCREAMING_SNAKE_CASE__ ) # prepare image lowercase : Dict = prepare_img() lowercase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) lowercase : List[str] = encoding["""pixel_values"""] logger.info(f"Converting model {model_name}..." ) # load original model from torch hub lowercase : Union[str, Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval() lowercase : Any = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowercase : str = """conditional_detr.""" + src rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE__ ) # query, key and value matrices need special treatment read_in_q_k_v(SCREAMING_SNAKE_CASE__ , is_panoptic=SCREAMING_SNAKE_CASE__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase : Optional[int] = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): lowercase : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowercase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: lowercase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): lowercase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = val # finally, create HuggingFace model and load state dict lowercase : str = ConditionalDetrForSegmentation(SCREAMING_SNAKE_CASE__ ) if is_panoptic else ConditionalDetrForObjectDetection(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() model.push_to_hub(repo_id=SCREAMING_SNAKE_CASE__ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion lowercase : List[Any] = conditional_detr(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 ) # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowercase : Any = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
336
0
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __SCREAMING_SNAKE_CASE : Optional[Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __lowerCamelCase ( datasets.BuilderConfig ): """simple docstring""" a_: Optional[datasets.Features] = None def snake_case_ ( lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : List[int] , ): '''simple docstring''' import pyspark def generate_fn(): _lowerCAmelCase =df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: _lowerCAmelCase =df_with_partition_id.select("""*""" ).where(f"part_id = {partition_id}" ).drop("""part_id""" ) _lowerCAmelCase =partition_df.collect() _lowerCAmelCase =0 for row in rows: yield f"{partition_id}_{row_id}", row.asDict() row_id += 1 return generate_fn class __lowerCamelCase ( _BaseExamplesIterable ): """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : str=None , ): _lowerCAmelCase =df _lowerCAmelCase =partition_order or range(self.df.rdd.getNumPartitions() ) _lowerCAmelCase =_generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Union[str, Any] ): yield from self.generate_examples_fn() def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : np.random.Generator ): _lowerCAmelCase =list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowerCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ ) def lowerCAmelCase__ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): _lowerCAmelCase =self.split_shard_indices_by_worker(lowerCamelCase_ , lowerCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ ) @property def lowerCAmelCase__ ( self : Any ): return len(self.partition_order ) class __lowerCamelCase ( datasets.DatasetBuilder ): """simple docstring""" a_: Dict = SparkConfig def __init__( self : Optional[int] , lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : str = None , lowerCamelCase_ : str = None , **lowerCamelCase_ : Optional[Any] , ): import pyspark _lowerCAmelCase =pyspark.sql.SparkSession.builder.getOrCreate() _lowerCAmelCase =df _lowerCAmelCase =working_dir super().__init__( cache_dir=lowerCamelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase_ , ) def lowerCAmelCase__ ( self : Dict ): # Returns the path of the created file. def create_cache_and_write_probe(lowerCamelCase_ : Tuple ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowerCamelCase_ ) _lowerCAmelCase =os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowerCamelCase_ , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowerCAmelCase =( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCAmelCase__ ( self : Tuple ): return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] ): import pyspark def get_arrow_batch_size(lowerCamelCase_ : Optional[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) _lowerCAmelCase =self.df.count() _lowerCAmelCase =df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowerCAmelCase =( self.df.limit(lowerCamelCase_ ) .repartition(1 ) .mapInArrow(lowerCamelCase_ , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowerCAmelCase =approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowerCAmelCase =min(lowerCamelCase_ , int(approx_total_size / max_shard_size ) ) _lowerCAmelCase =self.df.repartition(lowerCamelCase_ ) def lowerCAmelCase__ ( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int , ): import pyspark _lowerCAmelCase =ParquetWriter if file_format == """parquet""" else ArrowWriter _lowerCAmelCase =os.path.join(self._working_dir , os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath _lowerCAmelCase =file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowerCAmelCase =self.config.features _lowerCAmelCase =self._writer_batch_size _lowerCAmelCase =self._fs.storage_options def write_arrow(lowerCamelCase_ : str ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowerCAmelCase =pyspark.TaskContext().taskAttemptId() _lowerCAmelCase =next(lowerCamelCase_ , lowerCamelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) _lowerCAmelCase =0 _lowerCAmelCase =writer_class( features=lowerCamelCase_ , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , ) _lowerCAmelCase =pa.Table.from_batches([first_batch] ) writer.write_table(lowerCamelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowerCAmelCase , _lowerCAmelCase =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 _lowerCAmelCase =writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , ) _lowerCAmelCase =pa.Table.from_batches([batch] ) writer.write_table(lowerCamelCase_ ) if writer._num_bytes > 0: _lowerCAmelCase , _lowerCAmelCase =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ): _lowerCAmelCase =os.path.join(os.path.dirname(lowerCamelCase_ ) , os.path.basename(lowerCamelCase_ ) ) shutil.move(lowerCamelCase_ , lowerCamelCase_ ) _lowerCAmelCase =( self.df.mapInArrow(lowerCamelCase_ , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : "datasets.SplitGenerator" , lowerCamelCase_ : str = "arrow" , lowerCamelCase_ : Optional[Union[str, int]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : List[str] , ): self._validate_cache_dir() _lowerCAmelCase =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowerCamelCase_ ) _lowerCAmelCase =not is_remote_filesystem(self._fs ) _lowerCAmelCase =os.path.join if is_local else posixpath.join _lowerCAmelCase ="""-TTTTT-SSSSS-of-NNNNN""" _lowerCAmelCase =F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" _lowerCAmelCase =path_join(self._output_dir , lowerCamelCase_ ) _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =[] _lowerCAmelCase =[] for task_id, content in self._prepare_split_single(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowerCamelCase_ ) _lowerCAmelCase =total_num_examples _lowerCAmelCase =total_num_bytes # should rename everything at the end logger.debug(F"Renaming {total_shards} shards." ) if total_shards > 1: _lowerCAmelCase =all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowerCAmelCase =self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , ): rename( lowerCamelCase_ , fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace("""TTTTT-SSSSS""" , F"{global_shard_id:05d}" ).replace("""NNNNN""" , F"{total_shards:05d}" ) , ) _lowerCAmelCase =[] _lowerCAmelCase =0 for i in range(len(lowerCamelCase_ ) ): _lowerCAmelCase , _lowerCAmelCase =task_id_and_num_shards[i] for shard_id in range(lowerCamelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowerCamelCase_ , len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect() else: # don't use any pattern _lowerCAmelCase =0 _lowerCAmelCase =task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , F"{shard_id:05d}" ).replace("""TTTTT""" , F"{task_id:05d}" ) , fpath.replace(lowerCamelCase_ , """""" ) , ) def lowerCAmelCase__ ( self : int , lowerCamelCase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
149
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def snake_case_ ( lowercase__ : Optional[Any] , lowercase__ : List[Any]=False ): '''simple docstring''' _lowerCAmelCase =OmegaConf.load(lowercase__ ) if display: print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) ) return config def snake_case_ ( lowercase__ : Any , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None ): '''simple docstring''' if conf_path is None: _lowerCAmelCase ="""./model_checkpoints/vqgan_only.yaml""" _lowerCAmelCase =load_config(lowercase__ , display=lowercase__ ) _lowerCAmelCase =VQModel(**config.model.params ) if ckpt_path is None: _lowerCAmelCase ="""./model_checkpoints/vqgan_only.pt""" _lowerCAmelCase =torch.load(lowercase__ , map_location=lowercase__ ) if ".ckpt" in ckpt_path: _lowerCAmelCase =sd["""state_dict"""] model.load_state_dict(lowercase__ , strict=lowercase__ ) model.to(lowercase__ ) del sd return model def snake_case_ ( lowercase__ : int , lowercase__ : Any ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =model.encode(lowercase__ ) print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" ) _lowerCAmelCase =model.decode(lowercase__ ) return xrec def snake_case_ ( lowercase__ : str , lowercase__ : str=False ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase =string.rsplit(""".""" , 1 ) if reload: _lowerCAmelCase =importlib.import_module(lowercase__ ) importlib.reload(lowercase__ ) return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls ) def snake_case_ ( lowercase__ : str ): '''simple docstring''' if "target" not in config: raise KeyError("""Expected key `target` to instantiate.""" ) return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) ) def snake_case_ ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[int]=True , lowercase__ : Dict=True ): '''simple docstring''' _lowerCAmelCase =instantiate_from_config(lowercase__ ) if sd is not None: model.load_state_dict(lowercase__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple ): '''simple docstring''' if ckpt: _lowerCAmelCase =torch.load(lowercase__ , map_location="""cpu""" ) _lowerCAmelCase =pl_sd["""global_step"""] print(f"loaded model from global step {global_step}." ) else: _lowerCAmelCase ={"""state_dict""": None} _lowerCAmelCase =None _lowerCAmelCase =load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowercase__ , eval_mode=lowercase__ )["""model"""] return model, global_step
149
1
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int: """simple docstring""" A__ = (n * (n + 1) // 2) ** 2 A__ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
87
"""simple docstring""" import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A_ : def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,): '''simple docstring''' _lowerCamelCase : Optional[int] = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Any = image_size _lowerCamelCase : List[str] = patch_size _lowerCamelCase : Union[str, Any] = num_channels _lowerCamelCase : List[str] = is_training _lowerCamelCase : str = use_labels _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Optional[Any] = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Any = attention_probs_dropout_prob _lowerCamelCase : str = type_sequence_label_size _lowerCamelCase : int = initializer_range _lowerCamelCase : Dict = mask_ratio _lowerCamelCase : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _lowerCamelCase : str = (image_size // patch_size) ** 2 _lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : int = None if self.use_labels: _lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : str = self.get_config() return config, pixel_values, labels def _lowercase ( self: Union[str, Any] ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ): '''simple docstring''' _lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Dict = model(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2 _lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ) _lowerCamelCase : Any = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : int = self.prepare_config_and_inputs() _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs _lowerCamelCase : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ ( _a , _a , unittest.TestCase ): lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {} lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : int = ViTMAEModelTester(self ) _lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 ) def _lowercase ( self: List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' pass def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _lowerCamelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] _lowerCamelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] ,__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: str ): '''simple docstring''' _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ): '''simple docstring''' np.random.seed(2 ) _lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _lowerCamelCase : Dict = pt_noise super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : List[str] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) _lowerCamelCase : Any = outputs[0].cpu().numpy() _lowerCamelCase : Union[str, Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCAmelCase ) _lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase ) model.to(__lowerCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) ) # Make sure we don't have nans _lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy() _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase ,1e-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def _lowercase ( self: str ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def _lowercase ( self: Tuple ): '''simple docstring''' pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def _lowercase ( self: int ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _lowercase ( self: Dict ): '''simple docstring''' pass @slow def _lowercase ( self: Dict ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def lowerCamelCase_( ) -> str: '''simple docstring''' _lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): @cached_property def _lowercase ( self: str ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def _lowercase ( self: int ): '''simple docstring''' np.random.seed(2 ) _lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase ) _lowerCamelCase : int = self.default_image_processor _lowerCamelCase : int = prepare_img() _lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _lowerCamelCase : Tuple = ViTMAEConfig() _lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) ) # verify the logits _lowerCamelCase : Any = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape ,__lowerCAmelCase ) _lowerCamelCase : Tuple = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
46
0
'''simple docstring''' import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( _lowerCAmelCase ): a : str = (UnCLIPScheduler,) def UpperCAmelCase_ ( self , **UpperCamelCase ) -> Tuple: __lowerCAmelCase = { "num_train_timesteps": 1000, "variance_type": "fixed_small_log", "clip_sample": True, "clip_sample_range": 1.0, "prediction_type": "epsilon", } config.update(**_lowerCAmelCase ) return config def UpperCAmelCase_ ( self ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Any: for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Optional[Any]: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> int: for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase ) def UpperCAmelCase_ ( self ) -> Tuple: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(variance_type="fixed_small_log" ) __lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config(variance_type="learned_range" ) __lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) __lowerCAmelCase = 0.5 assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_01_00_11 < 1E-5 def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter __lowerCAmelCase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 __lowerCAmelCase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def UpperCAmelCase_ ( self ) -> str: __lowerCAmelCase = self.scheduler_classes[0] __lowerCAmelCase = self.get_scheduler_config() __lowerCAmelCase = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(25 ) __lowerCAmelCase = scheduler.timesteps __lowerCAmelCase = self.dummy_model() __lowerCAmelCase = self.dummy_sample_deter __lowerCAmelCase = torch.manual_seed(0 ) for i, t in enumerate(_lowerCAmelCase ): # 1. predict noise residual __lowerCAmelCase = model(_lowerCAmelCase , _lowerCAmelCase ) if i + 1 == timesteps.shape[0]: __lowerCAmelCase = None else: __lowerCAmelCase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 __lowerCAmelCase = scheduler.step( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample __lowerCAmelCase = pred_prev_sample __lowerCAmelCase = torch.sum(torch.abs(_lowerCAmelCase ) ) __lowerCAmelCase = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Union[str, Any]: pass def UpperCAmelCase_ ( self ) -> List[str]: pass
708
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self ) -> List[str]: # test for the above condition self.test() def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = 0 __lowerCAmelCase = False while not completed: if counter == 1: self.reset() __lowerCAmelCase = self.advance() if not self.does_advance(UpperCamelCase ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.update(UpperCamelCase ) counter += 1 if counter > 1_0000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def UpperCAmelCase_ ( self ) -> Dict: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase ) -> Any: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self ) -> int: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> str: raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> Dict: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __lowerCAmelCase = token_ids __lowerCAmelCase = len(self.token_ids ) __lowerCAmelCase = -1 # the index of the currently fulfilled step __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> Optional[int]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.fulfilled_idx += 1 __lowerCAmelCase = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase = True __lowerCAmelCase = completed else: # failed to make progress. __lowerCAmelCase = True self.reset() return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = False __lowerCAmelCase = 0 def UpperCAmelCase_ ( self ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Optional[Any]: __lowerCAmelCase = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.fulfilled_idx __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase , UpperCamelCase=True ) -> Optional[int]: __lowerCAmelCase = max([len(UpperCamelCase ) for one in nested_token_ids] ) __lowerCAmelCase = {} for token_ids in nested_token_ids: __lowerCAmelCase = root for tidx, token_id in enumerate(UpperCamelCase ): if token_id not in level: __lowerCAmelCase = {} __lowerCAmelCase = level[token_id] if no_subsets and self.has_subsets(UpperCamelCase , UpperCamelCase ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) __lowerCAmelCase = root def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: __lowerCAmelCase = self.trie for current_token in current_seq: __lowerCAmelCase = start[current_token] __lowerCAmelCase = list(start.keys() ) return next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> str: __lowerCAmelCase = self.next_tokens(UpperCamelCase ) return len(UpperCamelCase ) == 0 def UpperCAmelCase_ ( self , UpperCamelCase ) -> Optional[int]: __lowerCAmelCase = list(root.values() ) if len(UpperCamelCase ) == 0: return 1 else: return sum([self.count_leaves(UpperCamelCase ) for nn in next_nodes] ) def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: __lowerCAmelCase = self.count_leaves(UpperCamelCase ) return len(UpperCamelCase ) != leaf_count class UpperCAmelCase__ ( UpperCamelCase__ ): def __init__( self , UpperCamelCase ) -> List[Any]: super(UpperCamelCase , self ).__init__() if not isinstance(UpperCamelCase , UpperCamelCase ) or len(UpperCamelCase ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(UpperCamelCase , UpperCamelCase ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(UpperCamelCase , UpperCamelCase ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __lowerCAmelCase = DisjunctiveTrie(UpperCamelCase ) __lowerCAmelCase = nested_token_ids __lowerCAmelCase = self.trie.max_height __lowerCAmelCase = [] __lowerCAmelCase = False def UpperCAmelCase_ ( self ) -> List[Any]: __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCamelCase )}''' ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False if self.does_advance(UpperCamelCase ): self.current_seq.append(UpperCamelCase ) __lowerCAmelCase = True else: __lowerCAmelCase = True self.reset() __lowerCAmelCase = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase = completed return stepped, completed, reset def UpperCAmelCase_ ( self ) -> Dict: __lowerCAmelCase = False __lowerCAmelCase = [] def UpperCAmelCase_ ( self ) -> int: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase_ ( self , UpperCamelCase=False ) -> Union[str, Any]: __lowerCAmelCase = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase = self.seqlen __lowerCAmelCase = self.current_seq __lowerCAmelCase = self.completed return new_constraint class UpperCAmelCase__ : def __init__( self , UpperCamelCase ) -> Union[str, Any]: __lowerCAmelCase = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase = max([c.seqlen for c in constraints] ) __lowerCAmelCase = len(UpperCamelCase ) __lowerCAmelCase = False self.init_state() def UpperCAmelCase_ ( self ) -> Union[str, Any]: __lowerCAmelCase = [] __lowerCAmelCase = None __lowerCAmelCase = [constraint.copy(stateful=UpperCamelCase ) for constraint in self.constraints] def UpperCAmelCase_ ( self ) -> Optional[int]: __lowerCAmelCase = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase_ ( self ) -> List[str]: __lowerCAmelCase = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase = constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) else: __lowerCAmelCase = self.inprogress_constraint.advance() if isinstance(UpperCamelCase , UpperCamelCase ): token_list.append(UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): token_list.extend(UpperCamelCase ) if len(UpperCamelCase ) == 0: return None else: return token_list def UpperCAmelCase_ ( self , UpperCamelCase ) -> int: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase = self.add(UpperCamelCase ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase_ ( self , UpperCamelCase ) -> Dict: if not isinstance(UpperCamelCase , UpperCamelCase ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __lowerCAmelCase , __lowerCAmelCase = False, False if self.completed: __lowerCAmelCase = True __lowerCAmelCase = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.inprogress_constraint.update(UpperCamelCase ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCamelCase ) ) __lowerCAmelCase = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = pending_constraint.update(UpperCamelCase ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(UpperCamelCase ) __lowerCAmelCase = None if not complete and stepped: __lowerCAmelCase = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase_ ( self , UpperCamelCase=True ) -> str: __lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase = [ constraint.copy(stateful=UpperCamelCase ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCamelCase ) __lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints] return new_state
39
0
from math import sqrt def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" __a = True # 0 and 1 are none primes. if number <= 1: __a = False for divisor in range(2 , int(round(sqrt(__lowerCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __a = False break # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'status' must been from type bool" return status def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __a = list(range(2 , n + 1 ) ) __a = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__lowerCamelCase ) ): for j in range(i + 1 , len(__lowerCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __a = 0 # filters actual prime numbers. __a = [x for x in begin_list if x != 0] # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type list" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n > 2), "'N' must been an int and > 2" __a = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(__lowerCamelCase ): ans.append(__lowerCamelCase ) # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type list" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0" __a = [] # this list will be returns of the function. # potential prime number factors. __a = 2 __a = number if number == 0 or number == 1: ans.append(__lowerCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__lowerCamelCase ): while quotient != 1: if is_prime(__lowerCamelCase ) and (quotient % factor == 0): ans.append(__lowerCamelCase ) quotient /= factor else: factor += 1 else: ans.append(__lowerCamelCase ) # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type list" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __a = 0 # prime factorization of 'number' __a = prime_factorization(__lowerCamelCase ) __a = max(__lowerCamelCase ) # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type int" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __a = 0 # prime factorization of 'number' __a = prime_factorization(__lowerCamelCase ) __a = min(__lowerCamelCase ) # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'ans' must been from type int" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , __lowerCamelCase ), "compare bust been from type bool" return number % 2 == 0 def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , __lowerCamelCase ), "compare bust been from type bool" return number % 2 != 0 def lowerCAmelCase( __lowerCamelCase ): assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and (number > 2) and is_even(__lowerCamelCase ) ), "'number' must been an int, even and > 2" __a = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __a = get_prime_numbers(__lowerCamelCase ) __a = len(__lowerCamelCase ) # run variable for while-loops. __a = 0 __a = None # exit variable. for break up the loops __a = True while i < len_pn and loop: __a = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __a = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and (len(__lowerCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __a = 0 while numbera != 0: __a = numbera % numbera __a = numbera __a = rest # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __a = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __a = prime_factorization(__lowerCamelCase ) __a = prime_factorization(__lowerCamelCase ) elif numbera == 1 or numbera == 1: __a = [] __a = [] __a = max(__lowerCamelCase , __lowerCamelCase ) __a = 0 __a = 0 __a = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __a = prime_fac_a.count(__lowerCamelCase ) __a = prime_fac_a.count(__lowerCamelCase ) for _ in range(max(__lowerCamelCase , __lowerCamelCase ) ): ans *= n else: __a = prime_fac_a.count(__lowerCamelCase ) for _ in range(__lowerCamelCase ): ans *= n done.append(__lowerCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __a = prime_fac_a.count(__lowerCamelCase ) for _ in range(__lowerCamelCase ): ans *= n done.append(__lowerCamelCase ) # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 0), "'number' must been a positive int" __a = 0 __a = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__lowerCamelCase ): ans += 1 # precondition assert isinstance(__lowerCamelCase , __lowerCamelCase ) and is_prime( __lowerCamelCase ), "'ans' must been a prime number and from type int" return ans def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): assert ( is_prime(__lowerCamelCase ) and is_prime(__lowerCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __a = p_number_a + 1 # jump to the next number __a = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__lowerCamelCase ): number += 1 while number < p_number_a: ans.append(__lowerCamelCase ) number += 1 # fetch the next prime number. while not is_prime(__lowerCamelCase ): number += 1 # precondition assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and ans[0] != p_number_a and ans[len(__lowerCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1" __a = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(__lowerCamelCase ) # precondition assert ans[0] == 1 and ans[len(__lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" __a = get_divisors(__lowerCamelCase ) # precondition assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and (divisors[0] == 1) and (divisors[len(__lowerCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ): assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __a = gcd(abs(__lowerCamelCase ) , abs(__lowerCamelCase ) ) # precondition assert ( isinstance(__lowerCamelCase , __lowerCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0" __a = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCAmelCase( __lowerCamelCase ): assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0" __a = 0 __a = 1 __a = 1 # this will be return for _ in range(n - 1 ): __a = ans ans += fiba __a = tmp return ans
559
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCamelCase_ : Union[str, Any] = ["""bert-base-uncased""", """bert-base-cased"""] lowerCamelCase_ : Tuple = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class a__ ( tf.keras.Model ): def __init__( self , UpperCAmelCase ) -> Any: super().__init__() __a = tokenizer __a = AutoConfig.from_pretrained(UpperCAmelCase ) __a = TFAutoModel.from_config(UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[int]: __a = self.tokenizer(UpperCAmelCase ) __a = self.bert(**UpperCAmelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self ) -> Dict: super().setUp() __a = [ BertTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __a = [TFBertTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(UpperCAmelCase , use_fast_bert_tokenizer=UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __a = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] __a = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __a = tokenizer(UpperCAmelCase , return_tensors='tf' , padding='longest' ) __a = tf_tokenizer(UpperCAmelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: for tf_tokenizer in self.tf_tokenizers: __a = tf_tokenizer(self.paired_sentences ) __a = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: for tf_tokenizer in self.tf_tokenizers: __a = tf.function(UpperCAmelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): __a = tf.constant(UpperCAmelCase ) __a = compiled_tokenizer(UpperCAmelCase ) __a = tf_tokenizer(UpperCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self ) -> Dict: for tf_tokenizer in self.tf_tokenizers: __a = ModelToSave(tokenizer=UpperCAmelCase ) __a = tf.convert_to_tensor(self.test_sentences ) __a = model(UpperCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __a = Path(UpperCAmelCase ) / 'saved.model' model.save(UpperCAmelCase ) __a = tf.keras.models.load_model(UpperCAmelCase ) __a = loaded_model(UpperCAmelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
559
1
class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: _snake_case = name _snake_case = value _snake_case = weight def __repr__( self ) -> List[Any]: return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def lowerCAmelCase ( self ) -> Tuple: return self.value def lowerCAmelCase ( self ) -> Optional[Any]: return self.name def lowerCAmelCase ( self ) -> Union[str, Any]: return self.weight def lowerCAmelCase ( self ) -> List[Any]: return self.value / self.weight def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ) -> Dict: '''simple docstring''' _snake_case = [] for i in range(len(UpperCamelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> str: '''simple docstring''' _snake_case = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ ) _snake_case = [] _snake_case , _snake_case = 0.0, 0.0 for i in range(len(UpperCamelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCamelCase__ ( ) -> int: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
541
import argparse from collections import defaultdict import yaml UpperCAmelCase_ = """docs/source/en/_toctree.yml""" def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str: '''simple docstring''' _snake_case = defaultdict(UpperCamelCase__ ) _snake_case = [] _snake_case = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'local': doc['local'], 'title': doc['title']} ) else: new_doc_list.append(UpperCamelCase__ ) _snake_case = new_doc_list _snake_case = [key for key, value in counts.items() if value > 1] _snake_case = [] for duplicate_key in duplicates: _snake_case = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} ) if len(UpperCamelCase__ ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] ) _snake_case = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(UpperCamelCase__ ) > 1: raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' ) overview_doc.extend(UpperCamelCase__ ) # Sort return overview_doc def lowerCamelCase__ ( UpperCamelCase__ : Dict=False ) -> Optional[int]: '''simple docstring''' with open(UpperCamelCase__ , encoding='utf-8' ) as f: _snake_case = yaml.safe_load(f.read() ) # Get to the API doc _snake_case = 0 while content[api_idx]["title"] != "API": api_idx += 1 _snake_case = content[api_idx]['sections'] # Then to the model doc _snake_case = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _snake_case = api_doc[scheduler_idx]['sections'] _snake_case = clean_doc_toc(UpperCamelCase__ ) _snake_case = False if new_scheduler_doc != scheduler_doc: _snake_case = True if overwrite: _snake_case = new_scheduler_doc if diff: if overwrite: _snake_case = api_doc with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) def lowerCamelCase__ ( UpperCamelCase__ : Tuple=False ) -> List[Any]: '''simple docstring''' with open(UpperCamelCase__ , encoding='utf-8' ) as f: _snake_case = yaml.safe_load(f.read() ) # Get to the API doc _snake_case = 0 while content[api_idx]["title"] != "API": api_idx += 1 _snake_case = content[api_idx]['sections'] # Then to the model doc _snake_case = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _snake_case = False _snake_case = api_doc[pipeline_idx]['sections'] _snake_case = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _snake_case = pipeline_doc['section'] _snake_case = clean_doc_toc(UpperCamelCase__ ) if overwrite: _snake_case = new_sub_pipeline_doc new_pipeline_docs.append(UpperCamelCase__ ) # sort overall pipeline doc _snake_case = clean_doc_toc(UpperCamelCase__ ) if new_pipeline_docs != pipeline_docs: _snake_case = True if overwrite: _snake_case = new_pipeline_docs if diff: if overwrite: _snake_case = api_doc with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCAmelCase_ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
541
1
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor UpperCamelCase = logging.get_logger(__name__) class __lowerCamelCase ( UpperCamelCase__ ): """simple docstring""" def __init__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> None: warnings.warn( "The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DPTImageProcessor instead." , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
61
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def A ( __UpperCamelCase ) -> Optional[int]: A__ = filter(lambda __UpperCamelCase : p.requires_grad , model.parameters() ) A__ = sum([np.prod(p.size() ) for p in model_parameters] ) return params SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict: if metric == "rouge2": A__ = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": A__ = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": A__ = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": A__ = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) A__ = ModelCheckpoint( dirpath=__UpperCamelCase , filename=__UpperCamelCase , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def A ( __UpperCamelCase , __UpperCamelCase ) -> Any: return EarlyStopping( monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=__UpperCamelCase , verbose=__UpperCamelCase , ) class __lowerCAmelCase ( pl.Callback ): """simple docstring""" def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str ): """simple docstring""" A__ = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_snake_case ) @rank_zero_only def _a ( self : Union[str, Any] , _snake_case : pl.Trainer , _snake_case : pl.LightningModule , _snake_case : str , _snake_case : Optional[Any]=True ): """simple docstring""" logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) A__ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results A__ = Path(pl_module.hparams.output_dir ) if type_path == "test": A__ = od / 'test_results.txt' A__ = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. A__ = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' A__ = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_snake_case ) generations_file.parent.mkdir(exist_ok=_snake_case ) with open(_snake_case , 'a+' ) as writer: for key in sorted(_snake_case ): if key in ["log", "progress_bar", "preds"]: continue A__ = metrics[key] if isinstance(_snake_case , torch.Tensor ): A__ = val.item() A__ = F'''{key}: {val:.6f}\n''' writer.write(_snake_case ) if not save_generations: return if "preds" in metrics: A__ = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(_snake_case ) @rank_zero_only def _a ( self : Dict , _snake_case : List[str] , _snake_case : List[Any] ): """simple docstring""" try: A__ = pl_module.model.model.num_parameters() except AttributeError: A__ = pl_module.model.num_parameters() A__ = count_trainable_parameters(_snake_case ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def _a ( self : int , _snake_case : pl.Trainer , _snake_case : pl.LightningModule ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_snake_case , _snake_case , 'test' ) @rank_zero_only def _a ( self : Optional[Any] , _snake_case : pl.Trainer , _snake_case : List[Any] ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
9
0
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": __snake_case = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") __snake_case = parser.parse_args() if args.model_type == "roberta": __snake_case = RobertaForMaskedLM.from_pretrained(args.model_name) __snake_case = "roberta" elif args.model_type == "gpt2": __snake_case = GPTaLMHeadModel.from_pretrained(args.model_name) __snake_case = "transformer" __snake_case = model.state_dict() __snake_case = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: __snake_case = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: __snake_case = F'''{prefix}.embeddings.{w}.weight''' __snake_case = state_dict[param_name] for w in ["weight", "bias"]: __snake_case = F'''{prefix}.embeddings.LayerNorm.{w}''' __snake_case = state_dict[param_name] # Transformer Blocks # __snake_case = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: __snake_case = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] __snake_case = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: __snake_case = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: __snake_case = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: __snake_case = state_dict[F'''lm_head.dense.{w}'''] __snake_case = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: __snake_case = state_dict[F'''{prefix}.ln_f.{w}'''] __snake_case = state_dict["lm_head.weight"] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
704
import qiskit def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 2 ): """simple docstring""" UpperCamelCase = qubits # Using Aer's simulator UpperCamelCase = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register UpperCamelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , SCREAMING_SNAKE_CASE_ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , SCREAMING_SNAKE_CASE_ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(SCREAMING_SNAKE_CASE_ ) ) , list(range(SCREAMING_SNAKE_CASE_ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator UpperCamelCase = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1_000 ) return job.result().get_counts(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
181
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Dict = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase__ ( _lowerCAmelCase ): A = "vivit" def __init__( self : Any , UpperCamelCase_ : int=224 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : List[str]=[2, 16, 16] , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Any=768 , UpperCamelCase_ : List[str]=12 , UpperCamelCase_ : List[Any]=12 , UpperCamelCase_ : Optional[int]=3_072 , UpperCamelCase_ : Any="gelu_fast" , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : int=1e-0_6 , UpperCamelCase_ : int=True , **UpperCamelCase_ : str , ) -> int: """simple docstring""" lowerCamelCase_ : Optional[Any] = hidden_size lowerCamelCase_ : List[Any] = num_hidden_layers lowerCamelCase_ : List[str] = num_attention_heads lowerCamelCase_ : Any = intermediate_size lowerCamelCase_ : Optional[Any] = hidden_act lowerCamelCase_ : int = hidden_dropout_prob lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob lowerCamelCase_ : List[str] = initializer_range lowerCamelCase_ : int = layer_norm_eps lowerCamelCase_ : List[str] = image_size lowerCamelCase_ : Union[str, Any] = num_frames lowerCamelCase_ : Tuple = tubelet_size lowerCamelCase_ : Optional[int] = num_channels lowerCamelCase_ : List[Any] = qkv_bias super().__init__(**UpperCamelCase_ )
501
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __snake_case (__UpperCAmelCase , __UpperCAmelCase=7 ): """simple docstring""" lowerCamelCase_ : List[Any] = None if token is not None: lowerCamelCase_ : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowerCamelCase_ : List[str] = '''636036''' lowerCamelCase_ : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowerCamelCase_ : Optional[Any] = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json() return result["workflow_runs"] def __snake_case (__UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Optional[int] = get_daily_ci_runs(__UpperCAmelCase ) lowerCamelCase_ : Optional[Any] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowerCamelCase_ : Tuple = workflow_run['''id'''] break return workflow_run_id def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Union[str, Any] = get_last_daily_ci_runs(__UpperCAmelCase ) if workflow_run_id is not None: lowerCamelCase_ : List[str] = get_artifacts_links(worflow_run_id=__UpperCAmelCase , token=__UpperCAmelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowerCamelCase_ : Optional[int] = artifacts_links[artifact_name] download_artifact( artifact_name=__UpperCAmelCase , artifact_url=__UpperCAmelCase , output_dir=__UpperCAmelCase , token=__UpperCAmelCase ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" get_last_daily_ci_artifacts(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCamelCase_ : Optional[int] = {} for artifact_name in artifact_names: lowerCamelCase_ : List[Any] = os.path.join(__UpperCAmelCase , F"""{artifact_name}.zip""" ) if os.path.isfile(__UpperCAmelCase ): lowerCamelCase_ : int = {} with zipfile.ZipFile(__UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__UpperCAmelCase ): # read the file with z.open(__UpperCAmelCase ) as f: lowerCamelCase_ : List[str] = f.read().decode('''UTF-8''' ) return results
501
1
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class SCREAMING_SNAKE_CASE( unittest.TestCase ): def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=False , ) -> Union[str, Any]: """simple docstring""" __lowercase = size if size is not None else {"""height""": 20, """width""": 20} __lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = min_resolution __lowercase = max_resolution __lowercase = do_resize __lowercase = size __lowercase = do_center_crop __lowercase = crop_size __lowercase = do_normalize __lowercase = image_mean __lowercase = image_std __lowercase = do_reduce_labels def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def snake_case_ ( ): """simple docstring""" __lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" ) __lowercase = Image.open(dataset[0]["""file"""] ) __lowercase = Image.open(dataset[1]["""file"""] ) return image, map def snake_case_ ( ): """simple docstring""" __lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" ) __lowercase = Image.open(ds[0]["""file"""] ) __lowercase = Image.open(ds[1]["""file"""] ) __lowercase = Image.open(ds[2]["""file"""] ) __lowercase = Image.open(ds[3]["""file"""] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class SCREAMING_SNAKE_CASE( __A , unittest.TestCase ): snake_case_ : Optional[Any] = BeitImageProcessor if is_vision_available() else None def snake_case__ ( self ) -> Dict: """simple docstring""" __lowercase = BeitImageProcessingTester(self ) @property def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase__ , """image_std""" ) ) def snake_case__ ( self ) -> int: """simple docstring""" __lowercase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase__ ) __lowercase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase__ ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase__ ) def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" pass def snake_case__ ( self ) -> Any: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def snake_case__ ( self ) -> Tuple: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input __lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __lowercase = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def snake_case__ ( self ) -> Optional[int]: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) __lowercase = [] for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input __lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test batched __lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test not batched input (PIL images) __lowercase ,__lowercase = prepare_semantic_single_inputs() __lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 1, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) # Test batched input (PIL images) __lowercase ,__lowercase = prepare_semantic_batch_inputs() __lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors="""pt""" ) self.assertEqual( encoding["""pixel_values"""].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual( encoding["""labels"""].shape , ( 2, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) self.assertEqual(encoding["""labels"""].dtype , torch.long ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 ) def snake_case__ ( self ) -> Dict: """simple docstring""" __lowercase = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __lowercase ,__lowercase = prepare_semantic_single_inputs() __lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 150 ) __lowercase = True __lowercase = image_processing(lowerCamelCase__ , lowerCamelCase__ , return_tensors="""pt""" ) self.assertTrue(encoding["""labels"""].min().item() >= 0 ) self.assertTrue(encoding["""labels"""].max().item() <= 255 )
163
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class SCREAMING_SNAKE_CASE( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = initial_learning_rate __lowercase = warmup_steps __lowercase = power __lowercase = decay_schedule_fn __lowercase = name def __call__( self , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowercase = tf.cast(lowerCamelCase__ , tf.floataa ) __lowercase = tf.cast(self.warmup_steps , tf.floataa ) __lowercase = global_step_float / warmup_steps_float __lowercase = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , ) def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def snake_case_ ( a__ : float ,a__ : int ,a__ : int ,a__ : float = 0.0 ,a__ : float = 0.9 ,a__ : float = 0.9_9_9 ,a__ : float = 1e-8 ,a__ : Optional[float] = None ,a__ : Optional[float] = None ,a__ : float = 0.0 ,a__ : float = 1.0 ,a__ : Optional[List[str]] = None ,): """simple docstring""" __lowercase = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=a__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=a__ ,) if num_warmup_steps: __lowercase = WarmUp( initial_learning_rate=a__ ,decay_schedule_fn=a__ ,warmup_steps=a__ ,) if weight_decay_rate > 0.0: __lowercase = AdamWeightDecay( learning_rate=a__ ,weight_decay_rate=a__ ,beta_a=a__ ,beta_a=a__ ,epsilon=a__ ,clipnorm=a__ ,global_clipnorm=a__ ,exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] ,include_in_weight_decay=a__ ,) else: __lowercase = tf.keras.optimizers.Adam( learning_rate=a__ ,beta_a=a__ ,beta_a=a__ ,epsilon=a__ ,clipnorm=a__ ,global_clipnorm=a__ ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class SCREAMING_SNAKE_CASE( __A ): def __init__( self , lowerCamelCase__ = 0.0_01 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.9_99 , lowerCamelCase__ = 1E-7 , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "AdamWeightDecay" , **lowerCamelCase__ , ) -> Any: """simple docstring""" super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) __lowercase = weight_decay_rate __lowercase = include_in_weight_decay __lowercase = exclude_from_weight_decay @classmethod def snake_case__ ( cls , lowerCamelCase__ ) -> Optional[int]: """simple docstring""" __lowercase = {"""WarmUp""": WarmUp} return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowercase = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __lowercase = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Any: """simple docstring""" __lowercase ,__lowercase = list(zip(*lowerCamelCase__ ) ) return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowercase = apply_state or {} __lowercase = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowercase = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ ) __lowercase = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Any: """simple docstring""" __lowercase ,__lowercase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ ) __lowercase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict: """simple docstring""" __lowercase ,__lowercase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ ) __lowercase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def snake_case__ ( self ) -> Tuple: """simple docstring""" __lowercase = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def snake_case__ ( self , lowerCamelCase__ ) -> Tuple: """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None: return False return True class SCREAMING_SNAKE_CASE( __A ): def __init__( self ) -> Optional[int]: """simple docstring""" __lowercase = [] __lowercase = None @property def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" if self._accum_steps is None: __lowercase = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowerCamelCase__ ) -> Dict: """simple docstring""" if not self._gradients: __lowercase = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowerCamelCase__ ) != len(self._gradients ): raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}' ) for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowerCamelCase__ ) self._accum_steps.assign_add(1 ) def snake_case__ ( self ) -> str: """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
163
1
from PIL import Image def _lowerCamelCase( __snake_case ) -> Image: __snake_case , __snake_case = image.size __snake_case = 0 __snake_case = image.load() for i in range(lowercase_ ): for j in range(lowercase_ ): __snake_case = pixels[j, i] mean += pixel mean //= width * height for j in range(lowercase_ ): for i in range(lowercase_ ): __snake_case = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowerCamelCase__ = mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
524
"""simple docstring""" def lowerCAmelCase_( lowercase_ : int = 10 ) -> str: if not isinstance(lowercase_ , lowercase_ ) or n < 0: raise ValueError('''Invalid input''' ) _lowerCamelCase = 10**n _lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(1_0) = }""")
661
0
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]: __magic_name__ = XCLIPTextConfig() # derive patch size from model name __magic_name__ = model_name.find('''patch''' ) __magic_name__ = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __magic_name__ = XCLIPVisionConfig(patch_size=__UpperCamelCase , num_frames=__UpperCamelCase ) if "large" in model_name: __magic_name__ = 768 __magic_name__ = 3072 __magic_name__ = 12 __magic_name__ = 1024 __magic_name__ = 4096 __magic_name__ = 16 __magic_name__ = 24 __magic_name__ = 768 __magic_name__ = 3072 if model_name == "xclip-large-patch14-16-frames": __magic_name__ = 336 __magic_name__ = XCLIPConfig.from_text_vision_configs(__UpperCamelCase , __UpperCamelCase ) if "large" in model_name: __magic_name__ = 768 return config def lowercase ( __UpperCamelCase ) -> Dict: # text encoder if name == "token_embedding.weight": __magic_name__ = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __magic_name__ = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __magic_name__ = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __magic_name__ = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __magic_name__ = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __magic_name__ = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __magic_name__ = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __magic_name__ = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __magic_name__ = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __magic_name__ = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __magic_name__ = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __magic_name__ = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __magic_name__ = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __magic_name__ = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __magic_name__ = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __magic_name__ = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __magic_name__ = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __magic_name__ = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __magic_name__ = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __magic_name__ = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __magic_name__ = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __magic_name__ = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> str: for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(__UpperCamelCase ) if "attn.in_proj" in key: __magic_name__ = key.split('''.''' ) if key.startswith('''visual''' ): __magic_name__ = key_split[3] __magic_name__ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __magic_name__ = val[ :dim, : ] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[ -dim:, : ] else: __magic_name__ = val[ :dim ] __magic_name__ = val[ dim : dim * 2 ] __magic_name__ = val[ -dim: ] else: if "weight" in key: __magic_name__ = val[ :dim, : ] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[ -dim:, : ] else: __magic_name__ = val[:dim] __magic_name__ = val[ dim : dim * 2 ] __magic_name__ = val[-dim:] elif key.startswith('''mit''' ): __magic_name__ = key_split[2] __magic_name__ = config.vision_config.mit_hidden_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[dim : dim * 2, :] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[:dim] __magic_name__ = val[dim : dim * 2] __magic_name__ = val[-dim:] else: __magic_name__ = key_split[2] __magic_name__ = config.text_config.hidden_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[:dim] __magic_name__ = val[ dim : dim * 2 ] __magic_name__ = val[-dim:] else: __magic_name__ = rename_key(__UpperCamelCase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __magic_name__ = val.T __magic_name__ = val return orig_state_dict def lowercase ( __UpperCamelCase ) -> Any: if num_frames == 8: __magic_name__ = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __magic_name__ = '''eating_spaghetti.npy''' elif num_frames == 32: __magic_name__ = '''eating_spaghetti_32_frames.npy''' __magic_name__ = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=__UpperCamelCase , repo_type='''dataset''' , ) __magic_name__ = np.load(__UpperCamelCase ) return list(__UpperCamelCase ) def lowercase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=False ) -> Tuple: __magic_name__ = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __magic_name__ = model_to_url[model_name] __magic_name__ = 8 if "16-frames" in model_name: __magic_name__ = 16 elif "shot" in model_name: __magic_name__ = 32 __magic_name__ = get_xclip_config(__UpperCamelCase , __UpperCamelCase ) __magic_name__ = XCLIPModel(__UpperCamelCase ) model.eval() if "drive" in checkpoint_url: __magic_name__ = '''pytorch_model.bin''' gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase ) __magic_name__ = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model'''] else: __magic_name__ = torch.hub.load_state_dict_from_url(__UpperCamelCase )['''model'''] __magic_name__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) __magic_name__ = XCLIPModel(__UpperCamelCase ) __magic_name__ , __magic_name__ = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __magic_name__ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __magic_name__ = VideoMAEImageProcessor(size=__UpperCamelCase ) __magic_name__ = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __magic_name__ = XCLIPProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase ) __magic_name__ = prepare_video(__UpperCamelCase ) __magic_name__ = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=__UpperCamelCase , return_tensors='''pt''' , padding=__UpperCamelCase ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __magic_name__ = model(**__UpperCamelCase ) # Verify outputs __magic_name__ = outputs.logits_per_video __magic_name__ = logits_per_video.softmax(dim=1 ) print('''Probs:''' , __UpperCamelCase ) # kinetics-400 if model_name == "xclip-base-patch32": __magic_name__ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": __magic_name__ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] ) elif model_name == "xclip-base-patch16": __magic_name__ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": __magic_name__ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] ) elif model_name == "xclip-large-patch14": __magic_name__ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": __magic_name__ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __magic_name__ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __magic_name__ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __magic_name__ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __magic_name__ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __magic_name__ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __magic_name__ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __magic_name__ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __magic_name__ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __magic_name__ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __magic_name__ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __magic_name__ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __magic_name__ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(__UpperCamelCase , organization='''nielsr''' ) processor.push_to_hub(__UpperCamelCase , organization='''nielsr''' ) slow_tokenizer.push_to_hub(__UpperCamelCase , organization='''nielsr''' ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="xclip-base-patch32", type=str, help="Name of the model.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __lowerCamelCase = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
190
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __lowerCamelCase = logging.get_logger(__name__) class _lowercase ( __UpperCAmelCase ): def __init__( self , **UpperCamelCase_ ): requires_backends(self , ['''bs4'''] ) super().__init__(**UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ ): __magic_name__ = [] __magic_name__ = [] __magic_name__ = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __magic_name__ = parent.find_all(child.name , recursive=UpperCamelCase_ ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) ) __magic_name__ = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowerCAmelCase__ ( self , UpperCamelCase_ ): __magic_name__ = BeautifulSoup(UpperCamelCase_ , '''html.parser''' ) __magic_name__ = [] __magic_name__ = [] __magic_name__ = [] for element in html_code.descendants: if type(UpperCamelCase_ ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __magic_name__ = html.unescape(UpperCamelCase_ ).strip() if not text_in_this_tag: continue all_doc_strings.append(UpperCamelCase_ ) __magic_name__ , __magic_name__ = self.xpath_soup(UpperCamelCase_ ) stringaxtag_seq.append(UpperCamelCase_ ) stringaxsubs_seq.append(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ): __magic_name__ = '''''' for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self , UpperCamelCase_ ): __magic_name__ = False # Check that strings has a valid type if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __magic_name__ = True elif isinstance(UpperCamelCase_ , (list, tuple) ): if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ): __magic_name__ = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f'''but is of type {type(UpperCamelCase_ )}.''' ) __magic_name__ = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) ) if not is_batched: __magic_name__ = [html_strings] # Get nodes + xpaths __magic_name__ = [] __magic_name__ = [] for html_string in html_strings: __magic_name__ , __magic_name__ , __magic_name__ = self.get_three_from_single(UpperCamelCase_ ) nodes.append(UpperCamelCase_ ) __magic_name__ = [] for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __magic_name__ = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ ) xpath_strings.append(UpperCamelCase_ ) xpaths.append(UpperCamelCase_ ) # return as Dict __magic_name__ = {'''nodes''': nodes, '''xpaths''': xpaths} __magic_name__ = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ ) return encoded_inputs
190
1
'''simple docstring''' def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase : str = { """configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""", """PegasusXForConditionalGeneration""", """PegasusXModel""", """PegasusXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
202
0
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __a( lowercase__ ): """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = jnp.floataa lowerCAmelCase = True def a__ ( self ) -> int: super().setup() UpperCAmelCase_ : Tuple = nn.Dense(5 ,dtype=self.dtype ) def __call__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Optional[int] = super().__call__(*UpperCAmelCase__ ,**UpperCAmelCase__ ) UpperCAmelCase_ : Any = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __a( lowercase__ ): """simple docstring""" lowerCAmelCase = FlaxBigBirdForNaturalQuestionsModule def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' def cross_entropy(_lowercase , _lowercase , _lowercase=None ): UpperCAmelCase_ : Tuple = logits.shape[-1] UpperCAmelCase_ : Optional[Any] = (labels[..., None] == jnp.arange(_lowercase )[None]).astype('''f4''' ) UpperCAmelCase_ : Any = jax.nn.log_softmax(_lowercase , axis=-1 ) UpperCAmelCase_ : Union[str, Any] = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: UpperCAmelCase_ : List[Any] = reduction(_lowercase ) return loss UpperCAmelCase_ : Optional[int] = partial(_lowercase , reduction=jnp.mean ) UpperCAmelCase_ : Union[str, Any] = cross_entropy(_lowercase , _lowercase ) UpperCAmelCase_ : List[Any] = cross_entropy(_lowercase , _lowercase ) UpperCAmelCase_ : Tuple = cross_entropy(_lowercase , _lowercase ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __a: """simple docstring""" lowerCAmelCase = '''google/bigbird-roberta-base''' lowerCAmelCase = 3000 lowerCAmelCase = 1_0500 lowerCAmelCase = 128 lowerCAmelCase = 3 lowerCAmelCase = 1 lowerCAmelCase = 5 # tx_args lowerCAmelCase = 3E-5 lowerCAmelCase = 0.0 lowerCAmelCase = 2_0000 lowerCAmelCase = 0.0095 lowerCAmelCase = '''bigbird-roberta-natural-questions''' lowerCAmelCase = '''training-expt''' lowerCAmelCase = '''data/nq-training.jsonl''' lowerCAmelCase = '''data/nq-validation.jsonl''' def a__ ( self ) -> Optional[Any]: os.makedirs(self.base_dir ,exist_ok=UpperCAmelCase__ ) UpperCAmelCase_ : Tuple = os.path.join(self.base_dir ,self.save_dir ) UpperCAmelCase_ : List[str] = self.batch_size_per_device * jax.device_count() @dataclass class __a: """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 4096 # no dynamic padding on TPUs def __call__( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.collate_fn(UpperCAmelCase__ ) UpperCAmelCase_ : Tuple = jax.tree_util.tree_map(UpperCAmelCase__ ,UpperCAmelCase__ ) return batch def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: UpperCAmelCase_ : str = self.fetch_inputs(features['''input_ids'''] ) UpperCAmelCase_ : Optional[int] = { '''input_ids''': jnp.array(UpperCAmelCase__ ,dtype=jnp.intaa ), '''attention_mask''': jnp.array(UpperCAmelCase__ ,dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] ,dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] ,dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] ,dtype=jnp.intaa ), } return batch def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : int = [self._fetch_inputs(UpperCAmelCase__ ) for ids in input_ids] return zip(*UpperCAmelCase__ ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict: UpperCAmelCase_ : Tuple = [1 for _ in range(len(UpperCAmelCase__ ) )] while len(UpperCAmelCase__ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None ): '''simple docstring''' if seed is not None: UpperCAmelCase_ : Optional[int] = dataset.shuffle(seed=_lowercase ) for i in range(len(_lowercase ) // batch_size ): UpperCAmelCase_ : str = dataset[i * batch_size : (i + 1) * batch_size] yield dict(_lowercase ) @partial(jax.pmap , axis_name='''batch''' ) def lowerCamelCase__ ( _lowercase , _lowercase , **_lowercase ): '''simple docstring''' def loss_fn(_lowercase ): UpperCAmelCase_ : str = model_inputs.pop('''start_labels''' ) UpperCAmelCase_ : Any = model_inputs.pop('''end_labels''' ) UpperCAmelCase_ : Dict = model_inputs.pop('''pooled_labels''' ) UpperCAmelCase_ : Dict = state.apply_fn(**_lowercase , params=_lowercase , dropout_rng=_lowercase , train=_lowercase ) UpperCAmelCase_ : List[Any] = outputs return state.loss_fn( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) UpperCAmelCase_ : List[Any] = jax.random.split(_lowercase ) UpperCAmelCase_ : Union[str, Any] = jax.value_and_grad(_lowercase ) UpperCAmelCase_ : Tuple = grad_fn(state.params ) UpperCAmelCase_ : str = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) UpperCAmelCase_ : List[Any] = jax.lax.pmean(_lowercase , '''batch''' ) UpperCAmelCase_ : str = state.apply_gradients(grads=_lowercase ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def lowerCamelCase__ ( _lowercase , **_lowercase ): '''simple docstring''' UpperCAmelCase_ : int = model_inputs.pop('''start_labels''' ) UpperCAmelCase_ : Union[str, Any] = model_inputs.pop('''end_labels''' ) UpperCAmelCase_ : int = model_inputs.pop('''pooled_labels''' ) UpperCAmelCase_ : Dict = state.apply_fn(**_lowercase , params=state.params , train=_lowercase ) UpperCAmelCase_ : Any = outputs UpperCAmelCase_ : Optional[Any] = state.loss_fn(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) UpperCAmelCase_ : Optional[Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class __a( train_state.TrainState ): """simple docstring""" lowerCAmelCase = struct.field(pytree_node=lowercase__ ) @dataclass class __a: """simple docstring""" lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = 42 lowerCAmelCase = None def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = model.params UpperCAmelCase_ : Optional[int] = TrainState.create( apply_fn=model.__call__ ,params=UpperCAmelCase__ ,tx=UpperCAmelCase__ ,loss_fn=UpperCAmelCase__ ,) if ckpt_dir is not None: UpperCAmelCase_ : List[str] = restore_checkpoint(UpperCAmelCase__ ,UpperCAmelCase__ ) UpperCAmelCase_ : Optional[Any] = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } UpperCAmelCase_ : Optional[int] = build_tx(**UpperCAmelCase__ ) UpperCAmelCase_ : str = train_state.TrainState( step=UpperCAmelCase__ ,apply_fn=model.__call__ ,params=UpperCAmelCase__ ,tx=UpperCAmelCase__ ,opt_state=UpperCAmelCase__ ,) UpperCAmelCase_ : int = args UpperCAmelCase_ : Dict = data_collator UpperCAmelCase_ : Optional[int] = lr UpperCAmelCase_ : int = params UpperCAmelCase_ : Optional[Any] = jax_utils.replicate(UpperCAmelCase__ ) return state def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : List[Any] = self.args UpperCAmelCase_ : Dict = len(UpperCAmelCase__ ) // args.batch_size UpperCAmelCase_ : str = jax.random.PRNGKey(0 ) UpperCAmelCase_ : int = jax.random.split(UpperCAmelCase__ ,jax.device_count() ) for epoch in range(args.max_epochs ): UpperCAmelCase_ : Any = jnp.array(0 ,dtype=jnp.floataa ) UpperCAmelCase_ : Tuple = get_batched_dataset(UpperCAmelCase__ ,args.batch_size ,seed=UpperCAmelCase__ ) UpperCAmelCase_ : Optional[Any] = 0 for batch in tqdm(UpperCAmelCase__ ,total=UpperCAmelCase__ ,desc=f'''Running EPOCH-{epoch}''' ): UpperCAmelCase_ : Union[str, Any] = self.data_collator(UpperCAmelCase__ ) UpperCAmelCase_ : List[str] = self.train_step_fn(UpperCAmelCase__ ,UpperCAmelCase__ ,**UpperCAmelCase__ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: UpperCAmelCase_ : Optional[int] = jax_utils.unreplicate(state.step ) UpperCAmelCase_ : str = running_loss.item() / i UpperCAmelCase_ : Optional[Any] = self.scheduler_fn(state_step - 1 ) UpperCAmelCase_ : List[str] = self.evaluate(UpperCAmelCase__ ,UpperCAmelCase__ ) UpperCAmelCase_ : Any = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(UpperCAmelCase__ ) ) self.logger.log(UpperCAmelCase__ ,commit=UpperCAmelCase__ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' ,state=UpperCAmelCase__ ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : Dict = get_batched_dataset(UpperCAmelCase__ ,self.args.batch_size ) UpperCAmelCase_ : int = len(UpperCAmelCase__ ) // self.args.batch_size UpperCAmelCase_ : str = jnp.array(0 ,dtype=jnp.floataa ) UpperCAmelCase_ : Dict = 0 for batch in tqdm(UpperCAmelCase__ ,total=UpperCAmelCase__ ,desc='''Evaluating ... ''' ): UpperCAmelCase_ : Dict = self.data_collator(UpperCAmelCase__ ) UpperCAmelCase_ : List[Any] = self.val_step_fn(UpperCAmelCase__ ,**UpperCAmelCase__ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: UpperCAmelCase_ : Tuple = jax_utils.unreplicate(UpperCAmelCase__ ) print(f'''SAVING CHECKPOINT IN {save_dir}''' ,end=''' ... ''' ) self.model_save_fn(UpperCAmelCase__ ,params=state.params ) with open(os.path.join(UpperCAmelCase__ ,'''opt_state.msgpack''' ) ,'''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args ,os.path.join(UpperCAmelCase__ ,'''args.joblib''' ) ) joblib.dump(self.data_collator ,os.path.join(UpperCAmelCase__ ,'''data_collator.joblib''' ) ) with open(os.path.join(UpperCAmelCase__ ,'''training_state.json''' ) ,'''w''' ) as f: json.dump({'''step''': state.step.item()} ,UpperCAmelCase__ ) print('''DONE''' ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' ) with open(os.path.join(_lowercase , '''flax_model.msgpack''' ) , '''rb''' ) as f: UpperCAmelCase_ : int = from_bytes(state.params , f.read() ) with open(os.path.join(_lowercase , '''opt_state.msgpack''' ) , '''rb''' ) as f: UpperCAmelCase_ : Union[str, Any] = from_bytes(state.opt_state , f.read() ) UpperCAmelCase_ : List[str] = joblib.load(os.path.join(_lowercase , '''args.joblib''' ) ) UpperCAmelCase_ : Any = joblib.load(os.path.join(_lowercase , '''data_collator.joblib''' ) ) with open(os.path.join(_lowercase , '''training_state.json''' ) , '''r''' ) as f: UpperCAmelCase_ : Any = json.load(_lowercase ) UpperCAmelCase_ : Dict = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : str = num_train_steps - warmup_steps UpperCAmelCase_ : Dict = optax.linear_schedule(init_value=_lowercase , end_value=_lowercase , transition_steps=_lowercase ) UpperCAmelCase_ : Tuple = optax.linear_schedule(init_value=_lowercase , end_value=1E-7 , transition_steps=_lowercase ) UpperCAmelCase_ : List[str] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' def weight_decay_mask(_lowercase ): UpperCAmelCase_ : Tuple = traverse_util.flatten_dict(_lowercase ) UpperCAmelCase_ : Dict = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(_lowercase ) UpperCAmelCase_ : str = scheduler_fn(_lowercase , _lowercase , _lowercase , _lowercase ) UpperCAmelCase_ : str = optax.adamw(learning_rate=_lowercase , weight_decay=_lowercase , mask=_lowercase ) return tx, lr
700
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __a: """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Optional[Any] = 13 UpperCAmelCase_ : Optional[Any] = 7 UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : str = True UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : Dict = 2 UpperCAmelCase_ : Tuple = 99 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Optional[int] = 32 UpperCAmelCase_ : Optional[int] = 2 UpperCAmelCase_ : Tuple = 4 UpperCAmelCase_ : List[Any] = 0.1 UpperCAmelCase_ : int = 0.1 UpperCAmelCase_ : List[str] = 512 UpperCAmelCase_ : Any = 16 UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Any = 0.02 UpperCAmelCase_ : Tuple = 3 UpperCAmelCase_ : List[Any] = 4 UpperCAmelCase_ : Dict = '''last''' UpperCAmelCase_ : Dict = True UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Union[str, Any] = 0 def a__ ( self ) -> List[str]: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa ) UpperCAmelCase_ : Optional[Any] = None if self.use_input_lengths: UpperCAmelCase_ : Optional[int] = ( ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase_ : List[str] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs ) UpperCAmelCase_ : str = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Any = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices ) UpperCAmelCase_ : int = FlaubertConfig( vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any: UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask] UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str: UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple: UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths} UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int: UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths} UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str: UpperCAmelCase_ : List[Any] = self.num_choices UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) ) UpperCAmelCase_ : Dict = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def a__ ( self ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ) : Any = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''langs''': token_type_ids, '''lengths''': input_lengths, } return config, inputs_dict @require_tf class __a( _a , _a , unittest.TestCase ): """simple docstring""" lowerCAmelCase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCAmelCase = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase = False lowerCAmelCase = False def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def a__ ( self ) -> Any: UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 ) def a__ ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() def a__ ( self ) -> Tuple: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> str: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Tuple: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE ) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) @slow def a__ ( self ) -> Any: for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) @require_tf @require_sentencepiece @require_tokenizers class __a( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> int: UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' ) UpperCAmelCase_ : Dict = tf.convert_to_tensor( [[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !" UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0] UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. UpperCAmelCase_ : List[Any] = tf.convert_to_tensor( [ [ [-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18], [-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99], [-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
300
0
"""simple docstring""" import math def a ( __snake_case : float, __snake_case : float ): '''simple docstring''' return math.pow(__snake_case, 2 ) - a def a ( __snake_case : float ): '''simple docstring''' return 2 * x def a ( __snake_case : float ): '''simple docstring''' UpperCAmelCase_ :str = 2.0 while start <= a: UpperCAmelCase_ :Optional[int] = math.pow(__snake_case, 2 ) return start def a ( __snake_case : float, __snake_case : int = 9999, __snake_case : float = 0.00000000000001 ): '''simple docstring''' if a < 0: raise ValueError('''math domain error''' ) UpperCAmelCase_ :Optional[Any] = get_initial_point(__snake_case ) for _ in range(__snake_case ): UpperCAmelCase_ :int = value UpperCAmelCase_ :Tuple = value - fx(__snake_case, __snake_case ) / fx_derivative(__snake_case ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
608
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = ["GLPNFeatureExtractor"] __lowerCamelCase = ["GLPNImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", "GLPNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
608
1
import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = IFInpaintingPipeline UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} UpperCAmelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"""latents"""} def _a ( self ) -> Optional[int]: return self._get_dummy_components() def _a ( self , A_ , A_=0 ) -> Tuple: if str(lowercase__ ).startswith('mps' ): __UpperCamelCase =torch.manual_seed(lowercase__ ) else: __UpperCamelCase =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) __UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ ) __UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ ) __UpperCamelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _a ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _a ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def _a ( self ) -> str: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _a ( self ) -> Dict: self._test_save_load_local() def _a ( self ) -> Optional[Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
704
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ = None ) -> None: if components is None: __UpperCamelCase =[] __UpperCamelCase =list(A_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(A_ , self.__components ) ) + ")" def __add__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: raise Exception('must have the same size' ) def __sub__( self , A_ ) -> Vector: __UpperCamelCase =len(self ) if size == len(A_ ): __UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )] return Vector(A_ ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , A_ ) -> Vector: ... @overload def __mul__( self , A_ ) -> float: ... def __mul__( self , A_ ) -> float | Vector: if isinstance(A_ , (float, int) ): __UpperCamelCase =[c * other for c in self.__components] return Vector(A_ ) elif isinstance(A_ , A_ ) and len(self ) == len(A_ ): __UpperCamelCase =len(self ) __UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )] return sum(A_ ) else: # error case raise Exception('invalid operand!' ) def _a ( self ) -> Vector: return Vector(self.__components ) def _a ( self , A_ ) -> float: if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def _a ( self , A_ , A_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __UpperCamelCase =value def _a ( self ) -> float: if len(self.__components ) == 0: raise Exception('Vector is empty' ) __UpperCamelCase =[c**2 for c in self.__components] return math.sqrt(sum(A_ ) ) def _a ( self , A_ , A_ = False ) -> float: __UpperCamelCase =self * other __UpperCamelCase =self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return Vector([0] * dimension ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) __UpperCamelCase =[0] * dimension __UpperCamelCase =1 return Vector(SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ): assert ( isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )) ) return x * scalar + y def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] return Vector(SCREAMING_SNAKE_CASE__ ) class UpperCAmelCase__ : """simple docstring""" def __init__( self , A_ , A_ , A_ ) -> None: __UpperCamelCase =matrix __UpperCamelCase =w __UpperCamelCase =h def __str__( self ) -> str: __UpperCamelCase ='' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] + other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , A_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __UpperCamelCase =[] for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] - other.component(A_ , A_ ) for j in range(self.__width ) ] matrix.append(A_ ) return Matrix(A_ , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , A_ ) -> Matrix: ... @overload def __mul__( self , A_ ) -> Vector: ... def __mul__( self , A_ ) -> Vector | Matrix: if isinstance(A_ , A_ ): # matrix-vector if len(A_ ) == self.__width: __UpperCamelCase =zero_vector(self.__height ) for i in range(self.__height ): __UpperCamelCase =[ self.__matrix[i][j] * other.component(A_ ) for j in range(self.__width ) ] ans.change_component(A_ , sum(A_ ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(A_ , (int, float) ): # matrix-scalar __UpperCamelCase =[ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(A_ , self.__width , self.__height ) return None def _a ( self ) -> int: return self.__height def _a ( self ) -> int: return self.__width def _a ( self , A_ , A_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ , A_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __UpperCamelCase =value else: raise Exception('change_component: indices out of bounds' ) def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) __UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(A_ ) ): __UpperCamelCase =minor[i][:y] + minor[i][y + 1 :] return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant() def _a ( self , A_ , A_ ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(A_ , A_ ) else: raise Exception('Indices out of bounds' ) def _a ( self ) -> float: if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __UpperCamelCase =[ self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width ) ] return sum(A_ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): random.seed(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =[ [random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ ) ] return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
682
0
from manim import * class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def UpperCamelCase_ ( self : str ): __A = Rectangle(height=0.5 ,width=0.5 ) __A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) __A = [mem.copy() for i in range(6 )] __A = [mem.copy() for i in range(6 )] __A = VGroup(*A ).arrange(A ,buff=0 ) __A = VGroup(*A ).arrange(A ,buff=0 ) __A = VGroup(A ,A ).arrange(A ,buff=0 ) __A = Text("CPU" ,font_size=24 ) __A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A ) __A = [mem.copy() for i in range(1 )] __A = VGroup(*A ).arrange(A ,buff=0 ) __A = Text("GPU" ,font_size=24 ) __A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A ) gpu.align_to(A ,A ) gpu.set_x(gpu.get_x() - 1 ) self.add(A ) __A = [mem.copy() for i in range(6 )] __A = VGroup(*A ).arrange(A ,buff=0 ) __A = Text("Model" ,font_size=24 ) __A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A ) model.move_to([3, -1.0, 0] ) self.play( Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,) __A = MarkupText( f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,) __A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __A = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(A ,run_time=2.5 ) ,Write(A ) ,Write(A ) ) self.add(A ) __A = [] __A = [] __A = [] for i, rect in enumerate(A ): __A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(A ,opacity=0.7 ) cpu_target.move_to(A ) cpu_target.generate_target() __A = 0.46 / 4 __A = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target ,direction=A ,buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=A ,buff=0.0 ) cpu_targs.append(A ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) ) second_animations.append(MoveToTarget(A ,run_time=1.5 ) ) self.play(*A ) self.play(*A ) self.wait()
55
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCamelCase_ ( self : Optional[int] ): __A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) __A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) __A = "xvjiarui/stable-diffusion-2-inpainting" __A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(A ,safety_checker=A ) __A = "Face of a yellow cat, high resolution, sitting on a park bench" __A = jax.random.PRNGKey(0 ) __A = 50 __A = jax.device_count() __A = num_samples * [prompt] __A = num_samples * [init_image] __A = num_samples * [mask_image] __A , __A , __A = pipeline.prepare_inputs(A ,A ,A ) # shard inputs and rng __A = replicate(A ) __A = jax.random.split(A ,jax.device_count() ) __A = shard(A ) __A = shard(A ) __A = shard(A ) __A = pipeline( A ,A ,A ,A ,A ,A ,jit=A ) __A = output.images.reshape(A ,5_12 ,5_12 ,3 ) __A = images[0, 2_53:2_56, 2_53:2_56, -1] __A = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __A = jnp.array( [0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
55
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json', 'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json', 'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json', 'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json', 'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json', }, 'merges_file': { 'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt', 'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt', 'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt', 'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt', 'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt', }, 'tokenizer_file': { 'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json', 'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json', 'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json', 'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json', 'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json', }, } a_ = { 'gpt2': 1024, 'gpt2-medium': 1024, 'gpt2-large': 1024, 'gpt2-xl': 1024, 'distilgpt2': 1024, } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =GPTaTokenizer def __init__( self : str , a : Tuple=None , a : List[Any]=None , a : List[Any]=None , a : Any="<|endoftext|>" , a : str="<|endoftext|>" , a : Any="<|endoftext|>" , a : Union[str, Any]=False , **a : int , ) -> Union[str, Any]: """simple docstring""" super().__init__( a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , add_prefix_space=a , **a , ) SCREAMING_SNAKE_CASE : Tuple = kwargs.pop("add_bos_token" , a ) SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space: SCREAMING_SNAKE_CASE : Tuple = getattr(a , pre_tok_state.pop("type" ) ) SCREAMING_SNAKE_CASE : List[str] = add_prefix_space SCREAMING_SNAKE_CASE : List[Any] = pre_tok_class(**a ) SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space def __UpperCamelCase ( self : Optional[int] , *a : Tuple , **a : List[Any] ) -> BatchEncoding: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = kwargs.get("is_split_into_words" , a ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a , **a ) def __UpperCamelCase ( self : List[str] , *a : Any , **a : Dict ) -> BatchEncoding: """simple docstring""" SCREAMING_SNAKE_CASE : str = kwargs.get("is_split_into_words" , a ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*a , **a ) def __UpperCamelCase ( self : str , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(a , name=a ) return tuple(a ) def __UpperCamelCase ( self : Union[str, Any] , a : "Conversation" ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] ) if len(a ) > self.model_max_length: SCREAMING_SNAKE_CASE : List[Any] = input_ids[-self.model_max_length :] return input_ids
193
from __future__ import annotations def lowerCamelCase__ ( _a , _a): if b == 0: return (1, 0) ((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = extended_euclid(_a , a % b) SCREAMING_SNAKE_CASE : Dict = a // b return (y, x - k * y) def lowerCamelCase__ ( _a , _a , _a , _a): ((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : str = extended_euclid(_a , _a) SCREAMING_SNAKE_CASE : str = na * na SCREAMING_SNAKE_CASE : int = ra * x * na + ra * y * na return (n % m + m) % m def lowerCamelCase__ ( _a , _a): ((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : List[str] = extended_euclid(_a , _a) if b < 0: SCREAMING_SNAKE_CASE : int = (b % n + n) % n return b def lowerCamelCase__ ( _a , _a , _a , _a): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = invert_modulo(_a , _a), invert_modulo(_a , _a) SCREAMING_SNAKE_CASE : Optional[int] = na * na SCREAMING_SNAKE_CASE : Optional[Any] = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name='chinese_remainder_theorem', verbose=True) testmod(name='chinese_remainder_theorem2', verbose=True) testmod(name='invert_modulo', verbose=True) testmod(name='extended_euclid', verbose=True)
193
1
"""simple docstring""" import fire from utils import calculate_rouge, save_json def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]: """simple docstring""" A = [x.strip() for x in open(UpperCamelCase__ ).readlines()] A = [x.strip() for x in open(UpperCamelCase__ ).readlines()][: len(UpperCamelCase__ )] A = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) if save_path is not None: save_json(UpperCamelCase__ , UpperCamelCase__ , indent=UpperCamelCase__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
690
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ): lowerCAmelCase = LDMTextToImagePipeline lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - { """negative_prompt""", """negative_prompt_embeds""", """cross_attention_kwargs""", """prompt_embeds""", } lowerCAmelCase = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """callback""", """callback_steps""", } lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase = False def __a ( self : Dict ): torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) A = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , ) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) A = CLIPTextModel(_lowercase ) A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ): if str(_lowercase ).startswith('mps' ): A = torch.manual_seed(_lowercase ) else: A = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __a ( self : Any ): A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = LDMTextToImagePipeline(**_lowercase ) pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) A = self.get_dummy_inputs(_lowercase ) A = pipe(**_lowercase ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def __a ( self : Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ): A = torch.manual_seed(_lowercase ) A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) ) A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __a ( self : Union[str, Any] ): A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) A = self.get_inputs(_lowercase ) A = pipe(**_lowercase ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] ) A = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): def __a ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ): A = torch.manual_seed(_lowercase ) A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) ) A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __a ( self : List[str] ): A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) A = self.get_inputs(_lowercase ) A = pipe(**_lowercase ).images[0] A = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) A = np.abs(expected_image - image ).max() assert max_diff < 1e-3
690
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase ( unittest.TestCase): '''simple docstring''' def __init__( self : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any]=7 , snake_case : Dict=3 , snake_case : Optional[int]=18 , snake_case : Tuple=30 , snake_case : Dict=400 , snake_case : Tuple=True , snake_case : Dict=None , snake_case : Optional[Any]=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : Any = num_channels SCREAMING_SNAKE_CASE : Tuple = image_size SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution SCREAMING_SNAKE_CASE : List[str] = max_resolution SCREAMING_SNAKE_CASE : List[Any] = do_resize SCREAMING_SNAKE_CASE : Dict = size SCREAMING_SNAKE_CASE : str = apply_ocr def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowercase ( snake_case__ , unittest.TestCase): '''simple docstring''' UpperCAmelCase : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'apply_ocr' ) ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 18} ) SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='pt' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) self.assertIsInstance(encoding.words , _A ) self.assertIsInstance(encoding.boxes , _A ) # Test batched SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = LayoutLMvaImageProcessor() from datasets import load_dataset SCREAMING_SNAKE_CASE : Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' ) SCREAMING_SNAKE_CASE : str = Image.open(ds[0]['file'] ).convert('RGB' ) SCREAMING_SNAKE_CASE : Optional[int] = image_processing(_A , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 SCREAMING_SNAKE_CASE : Optional[int] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231 SCREAMING_SNAKE_CASE : Optional[int] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _A ) self.assertListEqual(encoding.boxes , _A ) # with apply_OCR = False SCREAMING_SNAKE_CASE : str = LayoutLMvaImageProcessor(apply_ocr=_A ) SCREAMING_SNAKE_CASE : int = image_processing(_A , return_tensors='pt' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
711
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : Optional[int] = 'git_vision_model' def __init__( self : Optional[Any] , snake_case : Any=768 , snake_case : List[str]=3072 , snake_case : Optional[Any]=12 , snake_case : Optional[Any]=12 , snake_case : Tuple=3 , snake_case : str=224 , snake_case : Tuple=16 , snake_case : Union[str, Any]="quick_gelu" , snake_case : Dict=1E-5 , snake_case : int=0.0 , snake_case : Union[str, Any]=0.02 , **snake_case : int , ): '''simple docstring''' super().__init__(**snake_case ) SCREAMING_SNAKE_CASE : Any = hidden_size SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : str = num_attention_heads SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : List[Any] = attention_dropout SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps SCREAMING_SNAKE_CASE : str = hidden_act @classmethod def lowerCamelCase_ ( cls : Optional[int] , snake_case : Union[str, os.PathLike] , **snake_case : List[Any] ): '''simple docstring''' cls._set_token_in_kwargs(snake_case ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": SCREAMING_SNAKE_CASE : Optional[int] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class lowercase ( SCREAMING_SNAKE_CASE_): '''simple docstring''' UpperCAmelCase : int = 'git' def __init__( self : Union[str, Any] , snake_case : str=None , snake_case : List[str]=30522 , snake_case : Optional[Any]=768 , snake_case : Optional[Any]=6 , snake_case : Union[str, Any]=12 , snake_case : Union[str, Any]=3072 , snake_case : Dict="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[Any]=0.1 , snake_case : str=1024 , snake_case : Tuple=0.02 , snake_case : Dict=1E-12 , snake_case : List[str]=0 , snake_case : Optional[int]="absolute" , snake_case : Optional[int]=True , snake_case : Optional[int]=False , snake_case : Optional[Any]=101 , snake_case : Optional[int]=102 , snake_case : int=None , **snake_case : Any , ): '''simple docstring''' super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case ) if vision_config is None: SCREAMING_SNAKE_CASE : List[Any] = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) SCREAMING_SNAKE_CASE : Union[str, Any] = GitVisionConfig(**snake_case ) SCREAMING_SNAKE_CASE : Optional[int] = vocab_size SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : Any = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = hidden_act SCREAMING_SNAKE_CASE : Tuple = intermediate_size SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Dict = layer_norm_eps SCREAMING_SNAKE_CASE : Any = position_embedding_type SCREAMING_SNAKE_CASE : Any = use_cache SCREAMING_SNAKE_CASE : int = tie_word_embeddings SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding SCREAMING_SNAKE_CASE : Tuple = bos_token_id SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : int = self.vision_config.to_dict() SCREAMING_SNAKE_CASE : Dict = self.__class__.model_type return output
308
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) if n == 0: return 0 __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( __UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __UpperCAmelCase ) ) return max_revue def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( __UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __UpperCAmelCase , __UpperCAmelCase ) , ) __SCREAMING_SNAKE_CASE = max_revenue return max_rev[n] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE = 0 for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max_rev[i] for j in range(1 , i + 1 ): __SCREAMING_SNAKE_CASE = max(__UpperCAmelCase , prices[j - 1] + max_rev[i - j] ) __SCREAMING_SNAKE_CASE = max_revenue_i return max_rev[n] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if n < 0: __SCREAMING_SNAKE_CASE = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__UpperCAmelCase ) if n > len(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = ( """Each integral piece of rod must have a corresponding price. """ f"""Got n = {n} but length of prices = {len(__UpperCAmelCase )}""" ) raise ValueError(__UpperCAmelCase ) def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23] __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. __SCREAMING_SNAKE_CASE = 36 __SCREAMING_SNAKE_CASE = top_down_cut_rod(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = bottom_up_cut_rod(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
109
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: _lowerCAmelCase : int = None _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : str = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } _lowerCAmelCase : Union[str, Any] = { "xlnet-base-cased": None, "xlnet-large-cased": None, } _lowerCAmelCase : str = "▁" # Segments (not really needed) _lowerCAmelCase : List[Any] = 0 _lowerCAmelCase : Optional[int] = 1 _lowerCAmelCase : Optional[int] = 2 _lowerCAmelCase : Optional[int] = 3 _lowerCAmelCase : int = 4 class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE = 'left' SCREAMING_SNAKE_CASE = XLNetTokenizer def __init__( self , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=True , __snake_case=False , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<sep>" , __snake_case="<pad>" , __snake_case="<cls>" , __snake_case="<mask>" , __snake_case=["<eop>", "<eod>"] , **__snake_case , ) -> Union[str, Any]: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it __a =AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token super().__init__( vocab_file=__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , **__snake_case , ) __a =3 __a =do_lower_case __a =remove_space __a =keep_accents __a =vocab_file __a =False if not self.vocab_file else True def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]: '''simple docstring''' __a =[self.sep_token_id] __a =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]: '''simple docstring''' __a =[self.sep_token_id] __a =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(__snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __a =os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ): copyfile(self.vocab_file , __snake_case ) return (out_vocab_file,)
242
0
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __magic_name__ ( __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) ) UpperCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) ) UpperCamelCase__ = 0.01 with locka.acquire(): with pytest.raises(__a ): UpperCamelCase__ = time.time() locka.acquire(__a ) assert time.time() - _start > timeout def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = """a""" * 1_000 + """.lock""" UpperCamelCase__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(__a ) assert len(os.path.basename(locka._lock_file ) ) <= 255 UpperCamelCase__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__a ): locka.acquire(0 )
701
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __lowerCAmelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_ = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase ) else: lowerCamelCase_ = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase ) for i, tensor in enumerate(_UpperCAmelCase ): if padding_side == "right": if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_ = tensor[:sequence_length] else: lowerCamelCase_ = tensor[:sequence_length] else: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_ = tensor[:sequence_length] else: lowerCamelCase_ = tensor[:sequence_length] return out_tensor.tolist() def __lowerCAmelCase ( UpperCAmelCase__ : Any ) -> Optional[int]: lowerCamelCase_ = ord(_UpperCAmelCase ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True lowerCamelCase_ = unicodedata.category(_UpperCAmelCase ) if cat.startswith("""P""" ): return True return False @dataclass class __A( UpperCAmelCase__ ): SCREAMING_SNAKE_CASE = 42 SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = -1_0_0 SCREAMING_SNAKE_CASE = "pt" def lowercase__ ( self : List[str] , __UpperCamelCase : int ): import torch lowerCamelCase_ = """label""" if """label""" in features[0].keys() else """labels""" lowerCamelCase_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowerCamelCase_ = self.tokenizer.pad( __UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch lowerCamelCase_ = torch.tensor(batch["""entity_ids"""] ).shape[1] lowerCamelCase_ = self.tokenizer.padding_side if padding_side == "right": lowerCamelCase_ = [ list(__UpperCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) for label in labels ] else: lowerCamelCase_ = [ [self.label_pad_token_id] * (sequence_length - len(__UpperCamelCase )) + list(__UpperCamelCase ) for label in labels ] lowerCamelCase_ = [feature["""ner_tags"""] for feature in features] lowerCamelCase_ = padding_tensor(__UpperCamelCase , -1 , __UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = [feature["""original_entity_spans"""] for feature in features] lowerCamelCase_ = padding_tensor(__UpperCamelCase , (-1, -1) , __UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = {k: torch.tensor(__UpperCamelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
272
'''simple docstring''' from __future__ import annotations class A__ : def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None: '''simple docstring''' _a : int =order # a_{0} ... a_{k} _a : Optional[Any] =[1.0] + [0.0] * order # b_{0} ... b_{k} _a : Tuple =[1.0] + [0.0] * order # x[n-1] ... x[n-k] _a : List[Any] =[0.0] * self.order # y[n-1] ... y[n-k] _a : Tuple =[0.0] * self.order def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < self.order: _a : int =[1.0, *a_coeffs] if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : int =( f"Expected a_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) != self.order + 1: _a : Optional[Any] =( f"Expected b_coeffs to have {self.order + 1} elements " f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}" ) raise ValueError(SCREAMING_SNAKE_CASE ) _a : List[str] =a_coeffs _a : Union[str, Any] =b_coeffs def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float: '''simple docstring''' _a : str =0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _a : str =self.input_history[:-1] _a : Optional[Any] =self.output_history[:-1] _a : Optional[int] =sample _a : Tuple =result return result
694
0
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" if index == number_of_items: return 0 __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = knapsack(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ) if weights[index] <= max_weight: __lowerCAmelCase = values[index] + knapsack( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , max_weight - weights[index] , index + 1 ) return max(UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
102
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
102
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar A = TypeVar('''KEY''') A = TypeVar('''VAL''') @dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase ) class __lowercase ( Generic[KEY, VAL] ): '''simple docstring''' __lowerCAmelCase = 42 __lowerCAmelCase = 42 class __lowercase ( _Item ): '''simple docstring''' def __init__( self ): super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __bool__( self ): return False A = _DeletedItem() class __lowercase ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , _UpperCAmelCase = 8 , _UpperCAmelCase = 0.7_5 ): __a : Union[str, Any] = initial_block_size __a : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __a : Optional[Any] = capacity_factor __a : Union[str, Any] = 0 def _lowerCamelCase ( self , _UpperCAmelCase ): return hash(_UpperCAmelCase ) % len(self._buckets ) def _lowerCamelCase ( self , _UpperCAmelCase ): return (ind + 1) % len(self._buckets ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[str] = self._buckets[ind] if not stored: __a : int = _Item(_UpperCAmelCase , _UpperCAmelCase ) self._len += 1 return True elif stored.key == key: __a : Dict = _Item(_UpperCAmelCase , _UpperCAmelCase ) return True else: return False def _lowerCamelCase ( self ): __a : Optional[int] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_UpperCAmelCase ) def _lowerCamelCase ( self ): if len(self._buckets ) <= self._initial_block_size: return False __a : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _lowerCamelCase ( self , _UpperCAmelCase ): __a : List[str] = self._buckets __a : List[Any] = [None] * new_size __a : int = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _lowerCamelCase ( self ): self._resize(len(self._buckets ) * 2 ) def _lowerCamelCase ( self ): self._resize(len(self._buckets ) // 2 ) def _lowerCamelCase ( self , _UpperCAmelCase ): __a : int = self._get_bucket_index(_UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind __a : Dict = self._get_next_ind(_UpperCAmelCase ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): for ind in self._iterate_buckets(_UpperCAmelCase ): if self._try_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): break def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase ): if self._is_full(): self._size_up() self._add_item(_UpperCAmelCase , _UpperCAmelCase ) def __delitem__( self , _UpperCAmelCase ): for ind in self._iterate_buckets(_UpperCAmelCase ): __a : List[str] = self._buckets[ind] if item is None: raise KeyError(_UpperCAmelCase ) if item is _deleted: continue if item.key == key: __a : Dict = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _UpperCAmelCase ): for ind in self._iterate_buckets(_UpperCAmelCase ): __a : List[str] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_UpperCAmelCase ) def __len__( self ): return self._len def __iter__( self ): yield from (item.key for item in self._buckets if item) def __repr__( self ): __a : List[str] = ''' ,'''.join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
52
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowerCAmelCase_ = sys.version_info >= (3, 10) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : float SCREAMING_SNAKE_CASE : str SCREAMING_SNAKE_CASE : bool @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : int = 42 SCREAMING_SNAKE_CASE : str = field(default="toto" , metadata={"help": "help message"} ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : Optional[bool] = None class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = "titi" SCREAMING_SNAKE_CASE : Any = "toto" class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = "titi" SCREAMING_SNAKE_CASE : Optional[Any] = "toto" SCREAMING_SNAKE_CASE : Any = 42 @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : BasicEnum = "toto" def snake_case__( self : Tuple ) ->List[str]: snake_case_ = BasicEnum(self.foo ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : MixedTypeEnum = "toto" def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = MixedTypeEnum(self.foo ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[float] = field(default=__A , metadata={"help": "help message"} ) SCREAMING_SNAKE_CASE : Optional[str] = None SCREAMING_SNAKE_CASE : Optional[List[str]] = list_field(default=[] ) SCREAMING_SNAKE_CASE : Optional[List[int]] = list_field(default=[] ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : List[int] = list_field(default=[] ) SCREAMING_SNAKE_CASE : List[int] = list_field(default=[1, 2, 3] ) SCREAMING_SNAKE_CASE : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] ) SCREAMING_SNAKE_CASE : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : List[int] = field() SCREAMING_SNAKE_CASE : str = field() SCREAMING_SNAKE_CASE : BasicEnum = field() def snake_case__( self : Optional[Any] ) ->Tuple: snake_case_ = BasicEnum(self.required_enum ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : int SCREAMING_SNAKE_CASE : "BasicEnum" = field() SCREAMING_SNAKE_CASE : "Optional[bool]" = None SCREAMING_SNAKE_CASE : "str" = field(default="toto" , metadata={"help": "help message"} ) SCREAMING_SNAKE_CASE : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : bool = False SCREAMING_SNAKE_CASE : bool = True SCREAMING_SNAKE_CASE : bool | None = None @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : int | None = None SCREAMING_SNAKE_CASE : float | None = field(default=__A , metadata={"help": "help message"} ) SCREAMING_SNAKE_CASE : str | None = None SCREAMING_SNAKE_CASE : list[str] | None = list_field(default=[] ) SCREAMING_SNAKE_CASE : list[int] | None = list_field(default=[] ) class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Dict , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser ) ->str: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''} snake_case_ = {k: v for k, v in vars(_UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _UpperCamelCase ) and yy.get('''choices''' , _UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_UpperCamelCase ) , yy['''type'''](_UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Optional[Any] ) ->Dict: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase ) expected.add_argument('''--bar''' , type=_UpperCamelCase , required=_UpperCamelCase ) expected.add_argument('''--baz''' , type=_UpperCamelCase , required=_UpperCamelCase ) expected.add_argument('''--flag''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((snake_case_), ) = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase ) self.assertFalse(example.flag ) def snake_case__( self : Tuple ) ->Optional[int]: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=_UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Tuple ) ->Tuple: snake_case_ = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase ) snake_case_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCamelCase ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(_UpperCamelCase ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_args([] ) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) ) snake_case_ = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) ) snake_case_ = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) ) snake_case_ = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) ) snake_case_ = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase ) ) def snake_case__( self : Tuple ) ->Tuple: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) snake_case_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) snake_case_ = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) snake_case_ = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) snake_case_ = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def snake_case__( self : Tuple ) ->Union[str, Any]: @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : Literal["titi", "toto", 42] = "toto" snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) snake_case_ = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) snake_case_ = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def snake_case__( self : List[str] ) ->int: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_args([] ) self.assertEqual( _UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) snake_case_ = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def snake_case__( self : Optional[Any] ) ->List[Any]: snake_case_ = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_UpperCamelCase , type=_UpperCamelCase ) expected.add_argument('''--bar''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_UpperCamelCase , type=_UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_UpperCamelCase ) snake_case_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCamelCase ) for dataclass_type in dataclass_types: snake_case_ = HfArgumentParser(_UpperCamelCase ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_args([] ) self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[] ) ) snake_case_ = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_UpperCamelCase , Namespace(foo=1_2 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def snake_case__( self : Union[str, Any] ) ->Optional[int]: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_UpperCamelCase , required=_UpperCamelCase ) expected.add_argument('''--required_str''' , type=_UpperCamelCase , required=_UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : List[str] ) ->int: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_UpperCamelCase , required=_UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_UpperCamelCase , ) expected.add_argument('''--opt''' , type=_UpperCamelCase , default=_UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_UpperCamelCase ) self.argparsersEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Dict ) ->Any: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = { '''foo''': 1_2, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } snake_case_ = parser.parse_dict(_UpperCamelCase )[0] snake_case_ = BasicExample(**_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : int ) ->Dict: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = { '''foo''': 1_2, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase ) def snake_case__( self : str ) ->Tuple: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = { '''foo''': 1_2, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(_UpperCamelCase , '''temp_json''' ) os.mkdir(_UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] snake_case_ = BasicExample(**_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Optional[int] ) ->str: snake_case_ = HfArgumentParser(_UpperCamelCase ) snake_case_ = { '''foo''': 1_2, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = os.path.join(_UpperCamelCase , '''temp_yaml''' ) os.mkdir(_UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_UpperCamelCase , _UpperCamelCase ) snake_case_ = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] snake_case_ = BasicExample(**_UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Any ) ->Any: snake_case_ = HfArgumentParser(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase )
39
0
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration __lowerCAmelCase = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def _lowercase ( a__ : Union[str, Any] ) -> str: """simple docstring""" _UpperCamelCase = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(a__ , a__ ) __lowerCAmelCase = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def _lowercase ( a__ : Any ) -> List[Any]: """simple docstring""" _UpperCamelCase = list(s_dict.keys() ) for key in keys: _UpperCamelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCamelCase = new_key.replace(a__ , a__ ) print(f'''{key} -> {new_key}''' ) _UpperCamelCase = s_dict.pop(a__ ) return s_dict def _lowercase ( a__ : Any ) -> Optional[Any]: """simple docstring""" _UpperCamelCase , _UpperCamelCase = emb.weight.shape _UpperCamelCase = nn.Linear(a__ , a__ , bias=a__ ) _UpperCamelCase = emb.weight.data return lin_layer def _lowercase ( a__ : str , a__ : str ) -> bytes: """simple docstring""" os.makedirs(a__ , exist_ok=a__ ) _UpperCamelCase = os.path.basename(a__ ) _UpperCamelCase = url.split("/" )[-2] _UpperCamelCase = os.path.join(a__ , a__ ) if os.path.exists(a__ ) and not os.path.isfile(a__ ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(a__ ): _UpperCamelCase = open(a__ , "rb" ).read() if hashlib.shaaaa(a__ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(a__ ) as source, open(a__ , "wb" ) as output: with tqdm( total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=a__ , unit_divisor=10_24 ) as loop: while True: _UpperCamelCase = source.read(81_92 ) if not buffer: break output.write(a__ ) loop.update(len(a__ ) ) _UpperCamelCase = open(a__ , "rb" ).read() if hashlib.shaaaa(a__ ).hexdigest() != expected_shaaaa: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def _lowercase ( a__ : Optional[Any] , a__ : Dict ) -> List[str]: """simple docstring""" if ".pt" not in checkpoint_path: _UpperCamelCase = _download(_MODELS[checkpoint_path] ) else: _UpperCamelCase = torch.load(a__ , map_location="cpu" ) _UpperCamelCase = original_checkpoint["dims"] _UpperCamelCase = original_checkpoint["model_state_dict"] _UpperCamelCase = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(a__ ) rename_keys(a__ ) _UpperCamelCase = True _UpperCamelCase = state_dict["decoder.layers.0.fc1.weight"].shape[0] _UpperCamelCase = WhisperConfig( vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=a__ , decoder_ffn_dim=a__ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , ) _UpperCamelCase = WhisperForConditionalGeneration(a__ ) _UpperCamelCase , _UpperCamelCase = model.model.load_state_dict(a__ , strict=a__ ) if len(a__ ) > 0 and not set(a__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f''' but all the following weights are missing {missing}''' ) if tie_embeds: _UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCamelCase = proj_out_weights model.save_pretrained(a__ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") __lowerCAmelCase = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
589
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowerCamelCase_ ( lowercase ): __lowercase : torch.FloatTensor class lowerCamelCase_ ( nn.Module ): def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("DownEncoderBlock2D",) , lowerCamelCase_=(64,) , lowerCamelCase_=2 , lowerCamelCase_=32 , lowerCamelCase_="silu" , lowerCamelCase_=True , ) -> List[str]: """simple docstring""" super().__init__() _UpperCamelCase = layers_per_block _UpperCamelCase = torch.nn.Convad( lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) _UpperCamelCase = None _UpperCamelCase = nn.ModuleList([] ) # down _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(lowerCamelCase_ ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCamelCase_ ) - 1 _UpperCamelCase = get_down_block( lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) self.down_blocks.append(lowerCamelCase_ ) # mid _UpperCamelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # out _UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1E-6 ) _UpperCamelCase = nn.SiLU() _UpperCamelCase = 2 * out_channels if double_z else out_channels _UpperCamelCase = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 ) _UpperCamelCase = False def lowercase ( self , lowerCamelCase_ ) -> str: """simple docstring""" _UpperCamelCase = x _UpperCamelCase = self.conv_in(lowerCamelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ ): def custom_forward(*lowerCamelCase_ ): return module(*lowerCamelCase_ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: _UpperCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) # middle _UpperCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: for down_block in self.down_blocks: _UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ ) # middle _UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ ) else: # down for down_block in self.down_blocks: _UpperCamelCase = down_block(lowerCamelCase_ ) # middle _UpperCamelCase = self.mid_block(lowerCamelCase_ ) # post-process _UpperCamelCase = self.conv_norm_out(lowerCamelCase_ ) _UpperCamelCase = self.conv_act(lowerCamelCase_ ) _UpperCamelCase = self.conv_out(lowerCamelCase_ ) return sample class lowerCamelCase_ ( nn.Module ): def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=("UpDecoderBlock2D",) , lowerCamelCase_=(64,) , lowerCamelCase_=2 , lowerCamelCase_=32 , lowerCamelCase_="silu" , lowerCamelCase_="group" , ) -> Union[str, Any]: """simple docstring""" super().__init__() _UpperCamelCase = layers_per_block _UpperCamelCase = nn.Convad( lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) _UpperCamelCase = None _UpperCamelCase = nn.ModuleList([] ) _UpperCamelCase = in_channels if norm_type == "spatial" else None # mid _UpperCamelCase = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , ) # up _UpperCamelCase = list(reversed(lowerCamelCase_ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCamelCase_ ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = i == len(lowerCamelCase_ ) - 1 _UpperCamelCase = get_up_block( lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , ) self.up_blocks.append(lowerCamelCase_ ) _UpperCamelCase = output_channel # out if norm_type == "spatial": _UpperCamelCase = SpatialNorm(block_out_channels[0] , lowerCamelCase_ ) else: _UpperCamelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1E-6 ) _UpperCamelCase = nn.SiLU() _UpperCamelCase = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 ) _UpperCamelCase = False def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = z _UpperCamelCase = self.conv_in(lowerCamelCase_ ) _UpperCamelCase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCamelCase_ ): def custom_forward(*lowerCamelCase_ ): return module(*lowerCamelCase_ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle _UpperCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) _UpperCamelCase = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: _UpperCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ ) else: # middle _UpperCamelCase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ ) _UpperCamelCase = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: _UpperCamelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ ) else: # middle _UpperCamelCase = self.mid_block(lowerCamelCase_ , lowerCamelCase_ ) _UpperCamelCase = sample.to(lowerCamelCase_ ) # up for up_block in self.up_blocks: _UpperCamelCase = up_block(lowerCamelCase_ , lowerCamelCase_ ) # post-process if latent_embeds is None: _UpperCamelCase = self.conv_norm_out(lowerCamelCase_ ) else: _UpperCamelCase = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ ) _UpperCamelCase = self.conv_act(lowerCamelCase_ ) _UpperCamelCase = self.conv_out(lowerCamelCase_ ) return sample class lowerCamelCase_ ( nn.Module ): def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="random" , lowerCamelCase_=False , lowerCamelCase_=True ) -> List[Any]: """simple docstring""" super().__init__() _UpperCamelCase = n_e _UpperCamelCase = vq_embed_dim _UpperCamelCase = beta _UpperCamelCase = legacy _UpperCamelCase = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) _UpperCamelCase = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) _UpperCamelCase = self.used.shape[0] _UpperCamelCase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": _UpperCamelCase = self.re_embed _UpperCamelCase = self.re_embed + 1 print( f'''Remapping {self.n_e} indices to {self.re_embed} indices. ''' f'''Using {self.unknown_index} for unknown indices.''' ) else: _UpperCamelCase = n_e _UpperCamelCase = sane_index_shape def lowercase ( self , lowerCamelCase_ ) -> str: """simple docstring""" _UpperCamelCase = inds.shape assert len(lowerCamelCase_ ) > 1 _UpperCamelCase = inds.reshape(ishape[0] , -1 ) _UpperCamelCase = self.used.to(lowerCamelCase_ ) _UpperCamelCase = (inds[:, :, None] == used[None, None, ...]).long() _UpperCamelCase = match.argmax(-1 ) _UpperCamelCase = match.sum(2 ) < 1 if self.unknown_index == "random": _UpperCamelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: _UpperCamelCase = self.unknown_index return new.reshape(lowerCamelCase_ ) def lowercase ( self , lowerCamelCase_ ) -> Tuple: """simple docstring""" _UpperCamelCase = inds.shape assert len(lowerCamelCase_ ) > 1 _UpperCamelCase = inds.reshape(ishape[0] , -1 ) _UpperCamelCase = self.used.to(lowerCamelCase_ ) if self.re_embed > self.used.shape[0]: # extra token _UpperCamelCase = 0 # simply set to zero _UpperCamelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ ) return back.reshape(lowerCamelCase_ ) def lowercase ( self , lowerCamelCase_ ) -> int: """simple docstring""" _UpperCamelCase = z.permute(0 , 2 , 3 , 1 ).contiguous() _UpperCamelCase = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z _UpperCamelCase = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 ) _UpperCamelCase = self.embedding(lowerCamelCase_ ).view(z.shape ) _UpperCamelCase = None _UpperCamelCase = None # compute loss for embedding if not self.legacy: _UpperCamelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: _UpperCamelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients _UpperCamelCase = z + (z_q - z).detach() # reshape back to match original input shape _UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: _UpperCamelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis _UpperCamelCase = self.remap_to_used(lowerCamelCase_ ) _UpperCamelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: _UpperCamelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: """simple docstring""" if self.remap is not None: _UpperCamelCase = indices.reshape(shape[0] , -1 ) # add batch axis _UpperCamelCase = self.unmap_to_all(lowerCamelCase_ ) _UpperCamelCase = indices.reshape(-1 ) # flatten again # get quantized latent vectors _UpperCamelCase = self.embedding(lowerCamelCase_ ) if shape is not None: _UpperCamelCase = z_q.view(lowerCamelCase_ ) # reshape back to match original input shape _UpperCamelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class lowerCamelCase_ ( lowercase ): def __init__( self , lowerCamelCase_ , lowerCamelCase_=False ) -> List[str]: """simple docstring""" _UpperCamelCase = parameters _UpperCamelCase , _UpperCamelCase = torch.chunk(lowerCamelCase_ , 2 , dim=1 ) _UpperCamelCase = torch.clamp(self.logvar , -30.0 , 20.0 ) _UpperCamelCase = deterministic _UpperCamelCase = torch.exp(0.5 * self.logvar ) _UpperCamelCase = torch.exp(self.logvar ) if self.deterministic: _UpperCamelCase = _UpperCamelCase = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def lowercase ( self , lowerCamelCase_ = None ) -> torch.FloatTensor: """simple docstring""" _UpperCamelCase = randn_tensor( self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) _UpperCamelCase = self.mean + self.std * sample return x def lowercase ( self , lowerCamelCase_=None ) -> List[Any]: """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def lowercase ( self , lowerCamelCase_ , lowerCamelCase_=[1, 2, 3] ) -> int: """simple docstring""" if self.deterministic: return torch.Tensor([0.0] ) _UpperCamelCase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ ) def lowercase ( self ) -> List[Any]: """simple docstring""" return self.mean
589
1
'''simple docstring''' def __snake_case (__UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Optional[int] = len(__UpperCAmelCase ) lowerCamelCase_ : List[str] = len(__UpperCAmelCase ) lowerCamelCase_ : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] lowerCamelCase_ : int = True for i in range(__UpperCAmelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase_ : Tuple = True if a[i].islower(): lowerCamelCase_ : Any = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
501
'''simple docstring''' import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __snake_case (__UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : int = tmp_path / '''cache''' lowerCamelCase_ : str = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase_ : Tuple = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Tuple = tmp_path / '''cache''' lowerCamelCase_ : int = {'''text''': '''string'''} lowerCamelCase_ : Optional[Any] = features.copy() if features else default_expected_features lowerCamelCase_ : Dict = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ : Optional[int] = TextDatasetReader(__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Union[str, Any] = tmp_path / '''cache''' lowerCamelCase_ : Any = {'''text''': '''string'''} lowerCamelCase_ : Optional[Any] = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , split=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" if issubclass(__UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase_ : List[Any] = text_path elif issubclass(__UpperCAmelCase , __UpperCAmelCase ): lowerCamelCase_ : Optional[int] = [text_path] lowerCamelCase_ : Any = tmp_path / '''cache''' lowerCamelCase_ : Dict = {'''text''': '''string'''} lowerCamelCase_ : Dict = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_dataset(__UpperCAmelCase , __UpperCAmelCase ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ): """simple docstring""" assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) for split in splits: lowerCamelCase_ : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : Tuple = tmp_path / '''cache''' lowerCamelCase_ : Optional[Any] = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCamelCase_ : Union[str, Any] = TextDatasetReader({'''train''': text_path} , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : List[str] = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCamelCase_ : Optional[Any] = {'''text''': '''string'''} lowerCamelCase_ : List[Any] = features.copy() if features else default_expected_features lowerCamelCase_ : List[str] = ( Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCamelCase_ : str = TextDatasetReader({'''train''': text_path} , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" if split: lowerCamelCase_ : Optional[Any] = {split: text_path} else: lowerCamelCase_ : List[str] = '''train''' lowerCamelCase_ : int = {'''train''': text_path, '''test''': text_path} lowerCamelCase_ : Union[str, Any] = tmp_path / '''cache''' lowerCamelCase_ : List[str] = {'''text''': '''string'''} lowerCamelCase_ : str = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read() _check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
501
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __UpperCAmelCase : """simple docstring""" @property def A ( self : int )-> List[Any]: return self.get_dummy_input() @property def A ( self : Tuple )-> Union[str, Any]: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"""\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.""" ) def A ( self : Union[str, Any] , A_ : Any=True , A_ : Optional[int]=False , A_ : Dict=False , A_ : List[str]=False , )-> Union[str, Any]: __UpperCamelCase = 4 __UpperCamelCase = 32 __UpperCamelCase = (32, 32) __UpperCamelCase = torch.manual_seed(0 ) __UpperCamelCase = torch.device(__a ) __UpperCamelCase = (batch_size, num_channels) + sizes __UpperCamelCase = randn_tensor(__a , generator=__a , device=__a ) __UpperCamelCase = {"""hidden_states""": hidden_states} if include_temb: __UpperCamelCase = 1_28 __UpperCamelCase = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a ) if include_res_hidden_states_tuple: __UpperCamelCase = torch.manual_seed(1 ) __UpperCamelCase = (randn_tensor(__a , generator=__a , device=__a ),) if include_encoder_hidden_states: __UpperCamelCase = floats_tensor((batch_size, 32, 32) ).to(__a ) if include_skip_sample: __UpperCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a ) return dummy_input def A ( self : Dict )-> Dict: __UpperCamelCase = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 1_28, } if self.block_type == "up": __UpperCamelCase = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) __UpperCamelCase = self.dummy_input return init_dict, inputs_dict def A ( self : str , A_ : Dict )-> Any: __UpperCamelCase = self.prepare_init_args_and_inputs_for_common() __UpperCamelCase = self.block_class(**__a ) unet_block.to(__a ) unet_block.eval() with torch.no_grad(): __UpperCamelCase = unet_block(**__a ) if isinstance(__a , __a ): __UpperCamelCase = output[0] self.assertEqual(output.shape , self.output_shape ) __UpperCamelCase = output[0, -1, -3:, -3:] __UpperCamelCase = torch.tensor(__a ).to(__a ) assert torch_all_close(output_slice.flatten() , __a , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def A ( self : Union[str, Any] )-> Tuple: __UpperCamelCase = self.prepare_init_args_and_inputs_for_common() __UpperCamelCase = self.block_class(**__a ) model.to(__a ) model.train() __UpperCamelCase = model(**__a ) if isinstance(__a , __a ): __UpperCamelCase = output[0] __UpperCamelCase = torch.device(__a ) __UpperCamelCase = randn_tensor(output.shape , device=__a ) __UpperCamelCase = torch.nn.functional.mse_loss(__a , __a ) loss.backward()
720
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = {"vocab_file": "spiece.model"} _A = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } _A = {"bert_for_seq_generation": 512} class __UpperCAmelCase ( snake_case__ ): """simple docstring""" _snake_case : Optional[Any] = VOCAB_FILES_NAMES _snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP _snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : List[int] = [] _snake_case : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self : Tuple , A_ : List[str] , A_ : List[str]="<s>" , A_ : Optional[int]="</s>" , A_ : Dict="<unk>" , A_ : Optional[int]="<pad>" , A_ : int="<::::>" , A_ : Optional[Dict[str, Any]] = None , **A_ : Optional[int] , )-> None: __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , ) __UpperCamelCase = vocab_file __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) @property def A ( self : Optional[Any] )-> List[str]: return self.sp_model.get_piece_size() def A ( self : List[Any] )-> int: __UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] )-> Dict: __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None return state def __setstate__( self : str , A_ : Optional[Any] )-> List[Any]: __UpperCamelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def A ( self : Optional[Any] , A_ : str )-> List[str]: return self.sp_model.encode(A_ , out_type=A_ ) def A ( self : List[str] , A_ : Union[str, Any] )-> str: return self.sp_model.piece_to_id(A_ ) def A ( self : List[Any] , A_ : Dict )-> Optional[Any]: __UpperCamelCase = self.sp_model.IdToPiece(A_ ) return token def A ( self : List[Any] , A_ : Any )-> Union[str, Any]: __UpperCamelCase = [] __UpperCamelCase = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(A_ ) + token __UpperCamelCase = [] else: current_sub_tokens.append(A_ ) out_string += self.sp_model.decode(A_ ) return out_string.strip() def A ( self : int , A_ : str , A_ : Optional[str] = None )-> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCamelCase = os.path.join( A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ , "wb" ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
228
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { """configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ """SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Swinv2ForImageClassification""", """Swinv2ForMaskedImageModeling""", """Swinv2Model""", """Swinv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
525
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
525
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ): '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = image_size __lowerCamelCase = num_channels __lowerCamelCase = embeddings_size __lowerCamelCase = hidden_sizes __lowerCamelCase = depths __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = hidden_act __lowerCamelCase = num_labels __lowerCamelCase = scope __lowerCamelCase = len(__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase = self.get_config() return config, pixel_values def lowerCamelCase ( self ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = FlaxRegNetModel(config=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __lowerCamelCase = self.num_labels __lowerCamelCase = FlaxRegNetForImageClassification(config=__UpperCAmelCase ) __lowerCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase ,__lowerCamelCase = config_and_inputs __lowerCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxRegNetModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase ( self ): '''simple docstring''' return def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def lowerCamelCase ( self ): '''simple docstring''' pass def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __lowerCamelCase = model_class(__UpperCAmelCase ) __lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) __lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowerCamelCase = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 ) __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowerCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) __lowerCamelCase = model_class(__UpperCAmelCase ) @jax.jit def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ): return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): __lowerCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __lowerCamelCase = model_jitted(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( ): __lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def lowerCamelCase ( self ): '''simple docstring''' __lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ) __lowerCamelCase = model(**__UpperCAmelCase ) # verify the logits __lowerCamelCase = (1, 1000) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) __lowerCamelCase = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
712
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ = logging.getLogger(__name__) def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ): __lowerCamelCase = np.argmax(_UpperCamelCase ,axis=1 ) return np.sum(outputs == labels ) def a__ ( _UpperCamelCase : Optional[int] ): with open(_UpperCamelCase ,encoding='''utf_8''' ) as f: __lowerCamelCase = csv.reader(_UpperCamelCase ) __lowerCamelCase = [] next(_UpperCamelCase ) # skip the first line for line in tqdm(_UpperCamelCase ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Any ,_UpperCamelCase : Dict ): __lowerCamelCase = [] for dataset in encoded_datasets: __lowerCamelCase = len(_UpperCamelCase ) __lowerCamelCase = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa ) __lowerCamelCase = np.zeros((n_batch, 2) ,dtype=np.intaa ) __lowerCamelCase = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa ) __lowerCamelCase = np.zeros((n_batch,) ,dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(_UpperCamelCase ): __lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCamelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowerCamelCase = with_conta __lowerCamelCase = with_conta __lowerCamelCase = len(_UpperCamelCase ) - 1 __lowerCamelCase = len(_UpperCamelCase ) - 1 __lowerCamelCase = with_conta __lowerCamelCase = with_conta __lowerCamelCase = mc_label __lowerCamelCase = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) ) return tensor_datasets def a__ ( ): __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--model_name''' ,type=_UpperCamelCase ,default='''openai-gpt''' ,help='''pretrained model name''' ) parser.add_argument('''--do_train''' ,action='''store_true''' ,help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' ,action='''store_true''' ,help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''The output directory where the model predictions and checkpoints will be written.''' ,) parser.add_argument('''--train_dataset''' ,type=_UpperCamelCase ,default='''''' ) parser.add_argument('''--eval_dataset''' ,type=_UpperCamelCase ,default='''''' ) parser.add_argument('''--seed''' ,type=_UpperCamelCase ,default=42 ) parser.add_argument('''--num_train_epochs''' ,type=_UpperCamelCase ,default=3 ) parser.add_argument('''--train_batch_size''' ,type=_UpperCamelCase ,default=8 ) parser.add_argument('''--eval_batch_size''' ,type=_UpperCamelCase ,default=16 ) parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=_UpperCamelCase ,help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' ,type=_UpperCamelCase ,default=1 ) parser.add_argument( '''--max_steps''' ,default=-1 ,type=_UpperCamelCase ,help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) ,) parser.add_argument( '''--gradient_accumulation_steps''' ,type=_UpperCamelCase ,default=1 ,help='''Number of updates steps to accumulate before performing a backward/update pass.''' ,) parser.add_argument('''--learning_rate''' ,type=_UpperCamelCase ,default=6.25e-5 ) parser.add_argument('''--warmup_steps''' ,default=0 ,type=_UpperCamelCase ,help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' ,type=_UpperCamelCase ,default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' ,type=_UpperCamelCase ,default=0.01 ) parser.add_argument('''--lm_coef''' ,type=_UpperCamelCase ,default=0.9 ) parser.add_argument('''--n_valid''' ,type=_UpperCamelCase ,default=3_74 ) parser.add_argument('''--server_ip''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' ,type=_UpperCamelCase ,default='''''' ,help='''Can be used for distant debugging.''' ) __lowerCamelCase = parser.parse_args() print(_UpperCamelCase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=_UpperCamelCase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __lowerCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) __lowerCamelCase = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase ,_UpperCamelCase ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __lowerCamelCase = ['''_start_''', '''_delimiter_''', '''_classify_'''] __lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(_UpperCamelCase ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) __lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(_UpperCamelCase ) ) model.to(_UpperCamelCase ) # Load and encode the datasets def tokenize_and_encode(_UpperCamelCase : Dict ): if isinstance(_UpperCamelCase ,_UpperCamelCase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) ) elif isinstance(_UpperCamelCase ,_UpperCamelCase ): return obj return [tokenize_and_encode(_UpperCamelCase ) for o in obj] logger.info('''Encoding dataset...''' ) __lowerCamelCase = load_rocstories_dataset(args.train_dataset ) __lowerCamelCase = load_rocstories_dataset(args.eval_dataset ) __lowerCamelCase = (train_dataset, eval_dataset) __lowerCamelCase = tokenize_and_encode(_UpperCamelCase ) # Compute the max input length for the Transformer __lowerCamelCase = model.config.n_positions // 2 - 2 __lowerCamelCase = max( len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __lowerCamelCase = min(_UpperCamelCase ,model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __lowerCamelCase = pre_process_datasets(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,*_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = tensor_datasets[0], tensor_datasets[1] __lowerCamelCase = TensorDataset(*_UpperCamelCase ) __lowerCamelCase = RandomSampler(_UpperCamelCase ) __lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.train_batch_size ) __lowerCamelCase = TensorDataset(*_UpperCamelCase ) __lowerCamelCase = SequentialSampler(_UpperCamelCase ) __lowerCamelCase = DataLoader(_UpperCamelCase ,sampler=_UpperCamelCase ,batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __lowerCamelCase = args.max_steps __lowerCamelCase = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1 else: __lowerCamelCase = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs __lowerCamelCase = list(model.named_parameters() ) __lowerCamelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] __lowerCamelCase = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] __lowerCamelCase = AdamW(_UpperCamelCase ,lr=args.learning_rate ,eps=args.adam_epsilon ) __lowerCamelCase = get_linear_schedule_with_warmup( _UpperCamelCase ,num_warmup_steps=args.warmup_steps ,num_training_steps=_UpperCamelCase ) if args.do_train: __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) ,desc='''Epoch''' ): __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = tqdm(_UpperCamelCase ,desc='''Training''' ) for step, batch in enumerate(_UpperCamelCase ): __lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch __lowerCamelCase = model(_UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase ) __lowerCamelCase = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __lowerCamelCase = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __lowerCamelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase ,scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __lowerCamelCase = model.module if hasattr(_UpperCamelCase ,'''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase ) __lowerCamelCase = os.path.join(args.output_dir ,_UpperCamelCase ) torch.save(model_to_save.state_dict() ,_UpperCamelCase ) model_to_save.config.to_json_file(_UpperCamelCase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __lowerCamelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __lowerCamelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(_UpperCamelCase ) if args.do_eval: model.eval() __lowerCamelCase ,__lowerCamelCase = 0, 0 __lowerCamelCase ,__lowerCamelCase = 0, 0 for batch in tqdm(_UpperCamelCase ,desc='''Evaluating''' ): __lowerCamelCase = tuple(t.to(_UpperCamelCase ) for t in batch ) __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = batch with torch.no_grad(): __lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = model( _UpperCamelCase ,mc_token_ids=_UpperCamelCase ,lm_labels=_UpperCamelCase ,mc_labels=_UpperCamelCase ) __lowerCamelCase = mc_logits.detach().cpu().numpy() __lowerCamelCase = mc_labels.to('''cpu''' ).numpy() __lowerCamelCase = accuracy(_UpperCamelCase ,_UpperCamelCase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __lowerCamelCase = eval_loss / nb_eval_steps __lowerCamelCase = eval_accuracy / nb_eval_examples __lowerCamelCase = tr_loss / nb_tr_steps if args.do_train else None __lowerCamelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} __lowerCamelCase = os.path.join(args.output_dir ,'''eval_results.txt''' ) with open(_UpperCamelCase ,'''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' ,_UpperCamelCase ,str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
622
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class lowerCAmelCase : def __init__( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=13 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int=3 , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=2 , ) -> Union[str, Any]: lowerCamelCase__ : List[str] = parent lowerCamelCase__ : List[str] = batch_size lowerCamelCase__ : Optional[Any] = image_size lowerCamelCase__ : List[Any] = patch_size lowerCamelCase__ : Union[str, Any] = num_channels lowerCamelCase__ : Any = is_training lowerCamelCase__ : List[str] = use_labels lowerCamelCase__ : Optional[int] = hidden_size lowerCamelCase__ : Tuple = num_hidden_layers lowerCamelCase__ : List[str] = num_attention_heads lowerCamelCase__ : Optional[int] = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : List[str] = hidden_dropout_prob lowerCamelCase__ : Tuple = attention_probs_dropout_prob lowerCamelCase__ : Dict = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : Tuple = scope lowerCamelCase__ : Dict = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase__ : str = (image_size // patch_size) ** 2 lowerCamelCase__ : int = num_patches + 2 def A_ ( self : List[str] ) -> str: lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ : List[str] = None if self.use_labels: lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : str = self.get_config() return config, pixel_values, labels def A_ ( self : Dict ) -> int: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ) -> Optional[int]: lowerCamelCase__ : Dict = TFDeiTModel(config=UpperCAmelCase ) lowerCamelCase__ : str = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ) -> List[Any]: lowerCamelCase__ : Tuple = TFDeiTForMaskedImageModeling(config=UpperCAmelCase ) lowerCamelCase__ : int = model(UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase__ : int = 1 lowerCamelCase__ : int = TFDeiTForMaskedImageModeling(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : Tuple = model(UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def A_ ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ) -> List[Any]: lowerCamelCase__ : str = self.type_sequence_label_size lowerCamelCase__ : List[Any] = TFDeiTForImageClassification(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ : Optional[int] = 1 lowerCamelCase__ : Dict = TFDeiTForImageClassification(UpperCAmelCase ) lowerCamelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ : str = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A_ ( self : List[str] ) -> Optional[int]: lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs() lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = config_and_inputs lowerCamelCase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = False def A_ ( self : int ) -> Tuple: lowerCamelCase__ : List[str] = TFDeiTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def A_ ( self : Optional[int] ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def A_ ( self : Dict ) -> Optional[Any]: pass def A_ ( self : List[Any] ) -> Union[str, Any]: lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[Any] = model_class(UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase__ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase , tf.keras.layers.Dense ) ) def A_ ( self : int ) -> Dict: lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ : Optional[Any] = model_class(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ : str = [*signature.parameters.keys()] lowerCamelCase__ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def A_ ( self : Optional[int] ) -> Tuple: lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def A_ ( self : Optional[int] ) -> int: lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase ) def A_ ( self : str ) -> Any: lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) def A_ ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False ) -> str: lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def A_ ( self : Optional[Any] ) -> List[Any]: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : List[Any] = TFDeiTModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: lowerCamelCase__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase ( unittest.TestCase ): @cached_property def A_ ( self : int ) -> Tuple: return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def A_ ( self : Dict ) -> List[str]: lowerCamelCase__ : List[Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) lowerCamelCase__ : Optional[Any] = self.default_image_processor lowerCamelCase__ : str = prepare_img() lowerCamelCase__ : Tuple = image_processor(images=UpperCAmelCase , return_tensors='tf' ) # forward pass lowerCamelCase__ : List[str] = model(**UpperCAmelCase ) # verify the logits lowerCamelCase__ : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) lowerCamelCase__ : Dict = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
295
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase ( unittest.TestCase ): @slow def A_ ( self : int ) -> Optional[Any]: lowerCamelCase__ : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ) lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('google/mt5-small' ) lowerCamelCase__ : int = tokenizer('Hello there' , return_tensors='tf' ).input_ids lowerCamelCase__ : int = tokenizer('Hi I am' , return_tensors='tf' ).input_ids lowerCamelCase__ : List[str] = model(UpperCAmelCase , labels=UpperCAmelCase ).loss lowerCamelCase__ : List[Any] = -tf.math.reduce_mean(UpperCAmelCase ).numpy() lowerCamelCase__ : Dict = -2_1.2_2_8_1_6_8 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
295
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class __lowercase: '''simple docstring''' def __init__( self , __a=2 , __a=3 , __a=64 , __a=None ): __lowerCamelCase : Optional[Any] = np.random.default_rng(__a ) __lowerCamelCase : int = length __lowerCamelCase : Union[str, Any] = rng.normal(size=(length,) ).astype(np.floataa ) __lowerCamelCase : Dict = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self ): return self.length def __getitem__( self , __a ): return {"x": self.x[i], "y": self.y[i]} class __lowercase( torch.nn.Module ): '''simple docstring''' def __init__( self , __a=0 , __a=0 , __a=False ): super().__init__() __lowerCamelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __lowerCamelCase : str = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) __lowerCamelCase : List[str] = True def snake_case_ ( self , __a=None ): if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) __lowerCamelCase : Union[str, Any] = False return x * self.a[0] + self.b[0] class __lowercase( torch.nn.Module ): '''simple docstring''' def __init__( self , __a=0 , __a=0 , __a=False ): super().__init__() __lowerCamelCase : Optional[Any] = torch.nn.Parameter(torch.tensor(__a ).float() ) __lowerCamelCase : int = torch.nn.Parameter(torch.tensor(__a ).float() ) __lowerCamelCase : str = True def snake_case_ ( self , __a=None ): if self.first_batch: print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' ) __lowerCamelCase : Tuple = False return x * self.a + self.b def UpperCAmelCase ( A__: Dict , A__: int = 16 ) -> str: from datasets import load_dataset from transformers import AutoTokenizer __lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) __lowerCamelCase : str = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'} __lowerCamelCase : int = load_dataset('csv' , data_files=A__ ) __lowerCamelCase : str = datasets['train'].unique('label' ) __lowerCamelCase : int = {v: i for i, v in enumerate(A__ )} def tokenize_function(A__: Dict ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase : Dict = tokenizer( examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ , padding='max_length' ) if "label" in examples: __lowerCamelCase : List[Any] = [label_to_id[l] for l in examples['label']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowerCamelCase : Optional[int] = datasets.map( A__ , batched=A__ , remove_columns=['sentence1', 'sentence2', 'label'] , ) def collate_fn(A__: Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. __lowerCamelCase : Dict = DataLoader(tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=2 ) __lowerCamelCase : str = DataLoader(tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=1 ) return train_dataloader, eval_dataloader
263
"""simple docstring""" import random def UpperCAmelCase ( A__: Union[str, Any] , A__: List[str] , A__: Union[str, Any] ) -> int: __lowerCamelCase : Optional[Any] = a[left_index] __lowerCamelCase : int = left_index + 1 for j in range(left_index + 1 , A__ ): if a[j] < pivot: __lowerCamelCase , __lowerCamelCase : Optional[int] = a[i], a[j] i += 1 __lowerCamelCase , __lowerCamelCase : str = a[i - 1], a[left_index] return i - 1 def UpperCAmelCase ( A__: List[Any] , A__: Tuple , A__: Tuple ) -> Dict: if left < right: __lowerCamelCase : Optional[int] = random.randint(A__ , right - 1 ) __lowerCamelCase , __lowerCamelCase : int = ( a[left], a[pivot], ) # switches the pivot with the left most bound __lowerCamelCase : Union[str, Any] = partition(A__ , A__ , A__ ) quick_sort_random( A__ , A__ , A__ ) # recursive quicksort to the left of the pivot point quick_sort_random( A__ , pivot_index + 1 , A__ ) # recursive quicksort to the right of the pivot point def UpperCAmelCase ( ) -> int: __lowerCamelCase : Dict = input('Enter numbers separated by a comma:\n' ).strip() __lowerCamelCase : int = [int(A__ ) for item in user_input.split(',' )] quick_sort_random(A__ , 0 , len(A__ ) ) print(A__ ) if __name__ == "__main__": main()
263
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def _lowerCamelCase ( ): '''simple docstring''' print('''Making key files...''' ) make_key_files('''rsa''' , 1024 ) print('''Key files generation successful.''' ) def _lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' print('''Generating prime p...''' ) A_ = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE ) print('''Generating prime q...''' ) A_ = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE ) A_ = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: A_ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) A_ = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) A_ = (n, e) A_ = (n, d) return (public_key, private_key) def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ): print('''\nWARNING:''' ) print( f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() A_ ,A_ = generate_key(SCREAMING_SNAKE_CASE ) print(f"\nWriting public key to file {name}_pubkey.txt..." ) with open(f"{name}_pubkey.txt" , '''w''' ) as out_file: out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" ) print(f"Writing private key to file {name}_privkey.txt..." ) with open(f"{name}_privkey.txt" , '''w''' ) as out_file: out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" ) if __name__ == "__main__": main()
203
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class _lowercase : def __init__( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=1_3 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=9_9 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : int=3_7 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : int=5_1_2 , lowerCamelCase__ : Optional[Any]=1_6 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=0.02 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="None" , lowerCamelCase__ : int=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : List[str]=None , ) -> List[str]: """simple docstring""" A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_input_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = num_labels A_ = num_choices A_ = relative_attention A_ = position_biased_input A_ = pos_att_type A_ = scope def UpperCamelCase ( self : str ) -> str: """simple docstring""" A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCamelCase__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> Any: """simple docstring""" A_ = TFDebertaVaModel(config=lowerCamelCase__ ) A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A_ = [input_ids, input_mask] A_ = model(lowerCamelCase__ ) A_ = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ) -> str: """simple docstring""" A_ = TFDebertaVaForMaskedLM(config=lowerCamelCase__ ) A_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) -> List[Any]: """simple docstring""" A_ = self.num_labels A_ = TFDebertaVaForSequenceClassification(config=lowerCamelCase__ ) A_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase ( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> Optional[Any]: """simple docstring""" A_ = self.num_labels A_ = TFDebertaVaForTokenClassification(config=lowerCamelCase__ ) A_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> Any: """simple docstring""" A_ = TFDebertaVaForQuestionAnswering(config=lowerCamelCase__ ) A_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } A_ = model(lowerCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self : int ) -> Optional[int]: """simple docstring""" A_ = self.prepare_config_and_inputs() ( ( A_ ) ,( A_ ) ,( A_ ) ,( A_ ) ,( A_ ) ,( A_ ) ,( A_ ) , ) = config_and_inputs A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ): _lowercase : Union[str, Any] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) _lowercase : Optional[Any] = ( { 'feature-extraction': TFDebertaVaModel, 'fill-mask': TFDebertaVaForMaskedLM, 'question-answering': TFDebertaVaForQuestionAnswering, 'text-classification': TFDebertaVaForSequenceClassification, 'token-classification': TFDebertaVaForTokenClassification, 'zero-shot': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) _lowercase : List[str] = False _lowercase : Any = False def UpperCamelCase ( self : int ) -> List[str]: """simple docstring""" A_ = TFDebertaVaModelTester(self ) A_ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 ) def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self : Any ) -> List[Any]: """simple docstring""" A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def UpperCamelCase ( self : Any ) -> Optional[int]: """simple docstring""" A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def UpperCamelCase ( self : Any ) -> List[str]: """simple docstring""" A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) def UpperCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ ) def UpperCamelCase ( self : Any ) -> List[str]: """simple docstring""" A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) @slow def UpperCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" A_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(lowerCamelCase__ ) @require_tf class _lowercase ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def UpperCamelCase ( self : Tuple ) -> List[str]: """simple docstring""" pass @slow def UpperCamelCase ( self : Any ) -> Any: """simple docstring""" A_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) A_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) A_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] A_ = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 )
203
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self: Any , snake_case: List[str] , snake_case: int=13 , snake_case: List[Any]=30 , snake_case: Optional[int]=2 , snake_case: str=3 , snake_case: Dict=True , snake_case: List[Any]=True , snake_case: Dict=32 , snake_case: str=2 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=0.1 , snake_case: Optional[Any]=0.1 , snake_case: int=10 , snake_case: str=0.0_2 , snake_case: List[Any]=3 , snake_case: str=None , ) -> List[str]: snake_case_ :Tuple = parent snake_case_ :int = batch_size snake_case_ :Tuple = image_size snake_case_ :str = patch_size snake_case_ :Tuple = num_channels snake_case_ :Any = is_training snake_case_ :Union[str, Any] = use_labels snake_case_ :List[Any] = hidden_size snake_case_ :Tuple = num_hidden_layers snake_case_ :List[Any] = num_attention_heads snake_case_ :Optional[Any] = intermediate_size snake_case_ :Union[str, Any] = hidden_act snake_case_ :Optional[Any] = hidden_dropout_prob snake_case_ :Optional[Any] = attention_probs_dropout_prob snake_case_ :List[str] = type_sequence_label_size snake_case_ :Optional[Any] = initializer_range snake_case_ :Optional[int] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ :Any = (image_size // patch_size) ** 2 snake_case_ :int = num_patches + 1 def lowerCAmelCase_ ( self: Dict ) -> Optional[int]: snake_case_ :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ :str = None if self.use_labels: snake_case_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ :Optional[int] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self: List[str] ) -> List[str]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Union[str, Any] , snake_case: List[Any] , snake_case: Optional[Any] ) -> int: snake_case_ :List[str] = TFViTModel(config=snake_case ) snake_case_ :Tuple = model(snake_case , training=snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case_ :str = self.image_size // 2 snake_case_ :str = pixel_values[:, :, :image_size, :image_size] snake_case_ :List[str] = model(snake_case , interpolate_pos_encoding=snake_case , training=snake_case ) snake_case_ :int = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[Any] , snake_case: int , snake_case: Tuple ) -> Optional[int]: snake_case_ :List[str] = self.type_sequence_label_size snake_case_ :List[str] = TFViTForImageClassification(snake_case ) snake_case_ :str = model(snake_case , labels=snake_case , training=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case_ :Optional[Any] = self.image_size // 2 snake_case_ :int = pixel_values[:, :, :image_size, :image_size] snake_case_ :Optional[int] = model(snake_case , interpolate_pos_encoding=snake_case , training=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ :Tuple = 1 snake_case_ :Tuple = TFViTForImageClassification(snake_case ) snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ :List[Any] = model(snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]: snake_case_ :List[str] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ :int = config_and_inputs snake_case_ :List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () _A : Any = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) _A : int = False _A : List[Any] = False _A : Any = False def lowerCAmelCase_ ( self: str ) -> str: snake_case_ :Dict = TFViTModelTester(self ) snake_case_ :str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 ) def lowerCAmelCase_ ( self: Optional[int] ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: Dict ) -> str: pass @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCAmelCase_ ( self: Dict ) -> List[str]: pass def lowerCAmelCase_ ( self: Optional[int] ) -> Dict: snake_case_, snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ :Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Layer ) ) def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]: snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ :str = model_class(snake_case ) snake_case_ :int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ :str = [*signature.parameters.keys()] snake_case_ :Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case ) def lowerCAmelCase_ ( self: Any ) -> int: snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCAmelCase_ ( self: Any ) -> Dict: snake_case_ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case ) @slow def lowerCAmelCase_ ( self: Any ) -> Optional[Any]: snake_case_ :Dict = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(snake_case ) def A_ ( ): '''simple docstring''' snake_case_ :List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self: str ) -> Union[str, Any]: return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self: Dict ) -> int: snake_case_ :int = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ) snake_case_ :Any = self.default_image_processor snake_case_ :List[str] = prepare_img() snake_case_ :int = image_processor(images=snake_case , return_tensors="""tf""" ) # forward pass snake_case_ :Tuple = model(**snake_case ) # verify the logits snake_case_ :str = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , snake_case ) snake_case_ :int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , snake_case , atol=1E-4 )
310
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint __a = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } __a = { "169M": 7_68, "430M": 10_24, "1B5": 20_48, "3B": 25_60, "7B": 40_96, "14B": 51_20, } def A_ ( _lowercase ): '''simple docstring''' snake_case_ :Any = list(state_dict.keys() ) for name in state_dict_keys: snake_case_ :List[str] = state_dict.pop(_lowercase ) # emb -> embedding if name.startswith("""emb.""" ): snake_case_ :Any = name.replace("""emb.""", """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): snake_case_ :Dict = name.replace("""blocks.0.ln0""", """blocks.0.pre_ln""" ) # att -> attention snake_case_ :List[str] = re.sub(r"""blocks\.(\d+)\.att""", r"""blocks.\1.attention""", _lowercase ) # ffn -> feed_forward snake_case_ :Dict = re.sub(r"""blocks\.(\d+)\.ffn""", r"""blocks.\1.feed_forward""", _lowercase ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): snake_case_ :str = name.replace(""".time_mix_k""", """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): snake_case_ :List[Any] = name.replace(""".time_mix_v""", """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): snake_case_ :Dict = name.replace(""".time_mix_r""", """.time_mix_receptance""" ) if name != "head.weight": snake_case_ :Optional[Any] = """rwkv.""" + name snake_case_ :int = weight return state_dict def A_ ( _lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=False, _lowercase=None ): '''simple docstring''' if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) snake_case_ :Dict = 50277 snake_case_ :Optional[Any] = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: snake_case_ :List[Any] = PreTrainedTokenizerFast(tokenizer_file=_lowercase ) snake_case_ :int = len(_lowercase ) tokenizer.save_pretrained(_lowercase ) # 2. Build the config snake_case_ :Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: snake_case_ :str = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" ) snake_case_ :Union[str, Any] = RwkvConfig( vocab_size=_lowercase, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], ) config.save_pretrained(_lowercase ) # 3. Download model file then convert state_dict snake_case_ :List[str] = hf_hub_download(_lowercase, _lowercase ) snake_case_ :int = torch.load(_lowercase, map_location="""cpu""" ) snake_case_ :Any = convert_state_dict(_lowercase ) # 4. Split in shards and save snake_case_, snake_case_ :Union[str, Any] = shard_checkpoint(_lowercase ) for shard_file, shard in shards.items(): torch.save(_lowercase, os.path.join(_lowercase, _lowercase ) ) if index is not None: snake_case_ :List[str] = os.path.join(_lowercase, _lowercase ) # Save the index as well with open(_lowercase, """w""", encoding="""utf-8""" ) as f: snake_case_ :List[str] = json.dumps(_lowercase, indent=2, sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) snake_case_ :int = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: snake_case_ :List[Any] = torch.load(os.path.join(_lowercase, _lowercase ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(_lowercase, _lowercase ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) snake_case_ :List[str] = AutoModelForCausalLM.from_pretrained(_lowercase ) model.push_to_hub(_lowercase, max_shard_size="""2GB""" ) tokenizer.push_to_hub(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) __a = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
310
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def __snake_case ( lowerCAmelCase_ ) -> List[Tuple[int, ...]]: SCREAMING_SNAKE_CASE__ = [] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for v in tree.values(): shapes.extend(_fetch_dims(lowerCAmelCase_ ) ) elif isinstance(lowerCAmelCase_ , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(lowerCAmelCase_ ) ) elif isinstance(lowerCAmelCase_ , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple[int, ...]: SCREAMING_SNAKE_CASE__ = [] for d in reversed(lowerCAmelCase_ ): idx.append(flat_idx % d ) SCREAMING_SNAKE_CASE__ = flat_idx // d return tuple(reversed(lowerCAmelCase_ ) ) @torch.jit.ignore def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(lowerCAmelCase_ ) -> None: SCREAMING_SNAKE_CASE__ = True for i in range(len(lowerCAmelCase_ ) ): SCREAMING_SNAKE_CASE__ = -1 * (i + 1) l[reversed_idx] &= tally SCREAMING_SNAKE_CASE__ = l[reversed_idx] if start_edges is None: SCREAMING_SNAKE_CASE__ = [s == 0 for s in start] reduce_edge_list(lowerCAmelCase_ ) if end_edges is None: SCREAMING_SNAKE_CASE__ = [e == (d - 1) for e, d in zip(lowerCAmelCase_ , lowerCAmelCase_ )] reduce_edge_list(lowerCAmelCase_ ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(lowerCAmelCase_ ) == 0: return [()] elif len(lowerCAmelCase_ ) == 1: return [(slice(start[0] , end[0] + 1 ),)] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] # Dimensions common to start and end can be selected directly for s, e in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if s == e: path_list.append(slice(lowerCAmelCase_ , s + 1 ) ) else: break SCREAMING_SNAKE_CASE__ = tuple(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = len(lowerCAmelCase_ ) # start == end, and we're done if divergence_idx == len(lowerCAmelCase_ ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None SCREAMING_SNAKE_CASE__ = start[divergence_idx] return tuple( path + (slice(lowerCAmelCase_ , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None SCREAMING_SNAKE_CASE__ = end[divergence_idx] return tuple( path + (slice(lowerCAmelCase_ , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) SCREAMING_SNAKE_CASE__ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> torch.Tensor: SCREAMING_SNAKE_CASE__ = t.shape[:no_batch_dims] SCREAMING_SNAKE_CASE__ = list(_flat_idx_to_idx(lowerCAmelCase_ , lowerCAmelCase_ ) ) # _get_minimal_slice_set is inclusive SCREAMING_SNAKE_CASE__ = list(_flat_idx_to_idx(flat_end - 1 , lowerCAmelCase_ ) ) # Get an ordered list of slices to perform SCREAMING_SNAKE_CASE__ = _get_minimal_slice_set( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) SCREAMING_SNAKE_CASE__ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ) -> Any: if not (len(lowerCAmelCase_ ) > 0): raise ValueError('''Must provide at least one input''' ) SCREAMING_SNAKE_CASE__ = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCAmelCase_ )] SCREAMING_SNAKE_CASE__ = tuple([max(lowerCAmelCase_ ) for s in zip(*lowerCAmelCase_ )] ) def _prep_inputs(lowerCAmelCase_ ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: SCREAMING_SNAKE_CASE__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) SCREAMING_SNAKE_CASE__ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: SCREAMING_SNAKE_CASE__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t SCREAMING_SNAKE_CASE__ = tensor_tree_map(_prep_inputs , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = None if _out is not None: SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda lowerCAmelCase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) SCREAMING_SNAKE_CASE__ = 1 for d in orig_batch_dims: flat_batch_dim *= d SCREAMING_SNAKE_CASE__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(lowerCAmelCase_ ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = prepped_outputs for _ in range(lowerCAmelCase_ ): # Chunk the input if not low_mem: SCREAMING_SNAKE_CASE__ = _select_chunk else: SCREAMING_SNAKE_CASE__ = partial( _chunk_slice , flat_start=lowerCAmelCase_ , flat_end=min(lowerCAmelCase_ , i + chunk_size ) , no_batch_dims=len(lowerCAmelCase_ ) , ) SCREAMING_SNAKE_CASE__ = tensor_tree_map(lowerCAmelCase_ , lowerCAmelCase_ ) # Run the layer on the chunk SCREAMING_SNAKE_CASE__ = layer(**lowerCAmelCase_ ) # Allocate space for the output if out is None: SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda lowerCAmelCase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCAmelCase_ ) # Put the chunk in its pre-allocated space if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): def assign(lowerCAmelCase_ , lowerCAmelCase_ ) -> None: for k, v in da.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): assign(lowerCAmelCase_ , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: SCREAMING_SNAKE_CASE__ = da[k] assign(lowerCAmelCase_ , lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for xa, xa in zip(lowerCAmelCase_ , lowerCAmelCase_ ): if _add_into_out: xa[i : i + chunk_size] += xa else: SCREAMING_SNAKE_CASE__ = xa elif isinstance(lowerCAmelCase_ , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: SCREAMING_SNAKE_CASE__ = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda lowerCAmelCase_ : t.view(orig_batch_dims + t.shape[1:] ) , lowerCAmelCase_ ) return out class __snake_case : '''simple docstring''' def __init__( self , A_ = 5_12 , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = max_chunk_size SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size SCREAMING_SNAKE_CASE__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] SCREAMING_SNAKE_CASE__ = [c for c in candidates if c > min_chunk_size] SCREAMING_SNAKE_CASE__ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(A_ ) -> bool: try: with torch.no_grad(): fn(*A_ , chunk_size=A_ ) return True except RuntimeError: return False SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = len(A_ ) - 1 while i > min_viable_chunk_size_index: SCREAMING_SNAKE_CASE__ = test_chunk_size(candidates[i] ) if not viable: SCREAMING_SNAKE_CASE__ = (min_viable_chunk_size_index + i) // 2 else: SCREAMING_SNAKE_CASE__ = i SCREAMING_SNAKE_CASE__ = (i + len(A_ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def lowercase_ ( self , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = True for aa, aa in zip(A_ , A_ ): assert type(A_ ) == type(A_ ) if isinstance(A_ , (list, tuple) ): consistent &= self._compare_arg_caches(A_ , A_ ) elif isinstance(A_ , A_ ): SCREAMING_SNAKE_CASE__ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )] SCREAMING_SNAKE_CASE__ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )] consistent &= self._compare_arg_caches(A_ , A_ ) else: consistent &= aa == aa return consistent def lowercase_ ( self , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(A_ ) SCREAMING_SNAKE_CASE__ = self._compare_arg_caches(self.cached_arg_data , A_ ) else: # Otherwise, we can reuse the precomputed value SCREAMING_SNAKE_CASE__ = False if not consistent: SCREAMING_SNAKE_CASE__ = self._determine_favorable_chunk_size( A_ , A_ , A_ , ) SCREAMING_SNAKE_CASE__ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
100
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class a ( UpperCAmelCase ): _lowercase = "conditional_detr" _lowercase = ["past_key_values"] _lowercase = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) _UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(A_ , A_ ): _UpperCAmelCase : Optional[Any] = backbone_config.get("model_type" ) _UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase : Dict = config_class.from_dict(A_ ) _UpperCAmelCase : Any = use_timm_backbone _UpperCAmelCase : List[Any] = backbone_config _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : int = num_queries _UpperCAmelCase : Union[str, Any] = d_model _UpperCAmelCase : Dict = encoder_ffn_dim _UpperCAmelCase : Any = encoder_layers _UpperCAmelCase : List[str] = encoder_attention_heads _UpperCAmelCase : Optional[int] = decoder_ffn_dim _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Optional[Any] = decoder_attention_heads _UpperCAmelCase : Optional[int] = dropout _UpperCAmelCase : List[Any] = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : List[str] = activation_function _UpperCAmelCase : Optional[int] = init_std _UpperCAmelCase : List[Any] = init_xavier_std _UpperCAmelCase : Optional[int] = encoder_layerdrop _UpperCAmelCase : List[str] = decoder_layerdrop _UpperCAmelCase : Optional[int] = encoder_layers _UpperCAmelCase : Union[str, Any] = auxiliary_loss _UpperCAmelCase : str = position_embedding_type _UpperCAmelCase : str = backbone _UpperCAmelCase : int = use_pretrained_backbone _UpperCAmelCase : Optional[int] = dilation # Hungarian matcher _UpperCAmelCase : Optional[int] = class_cost _UpperCAmelCase : Tuple = bbox_cost _UpperCAmelCase : Dict = giou_cost # Loss coefficients _UpperCAmelCase : Any = mask_loss_coefficient _UpperCAmelCase : int = dice_loss_coefficient _UpperCAmelCase : Any = cls_loss_coefficient _UpperCAmelCase : Any = bbox_loss_coefficient _UpperCAmelCase : Optional[int] = giou_loss_coefficient _UpperCAmelCase : List[Any] = focal_alpha super().__init__(is_encoder_decoder=A_ , **A_ ) @property def _UpperCAmelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def _UpperCAmelCase ( self ): '''simple docstring''' return self.d_model def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCAmelCase : Tuple = self.backbone_config.to_dict() _UpperCAmelCase : Tuple = self.__class__.model_type return output class a ( UpperCAmelCase ): _lowercase = version.parse("1.11" ) @property def _UpperCAmelCase ( self ): '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _UpperCAmelCase ( self ): '''simple docstring''' return 1e-5 @property def _UpperCAmelCase ( self ): '''simple docstring''' return 12
300
0
from ...configuration_utils import PretrainedConfig class lowercase__ ( __SCREAMING_SNAKE_CASE ): A__= 'bert-generation' def __init__( self : Optional[int] , _lowercase : Optional[int]=5_03_58 , _lowercase : Dict=10_24 , _lowercase : Dict=24 , _lowercase : Optional[int]=16 , _lowercase : List[str]=40_96 , _lowercase : int="gelu" , _lowercase : str=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : List[str]=5_12 , _lowercase : List[Any]=0.0_2 , _lowercase : List[str]=1E-12 , _lowercase : str=0 , _lowercase : Optional[Any]=2 , _lowercase : Optional[Any]=1 , _lowercase : Tuple="absolute" , _lowercase : int=True , **_lowercase : Optional[int] , ): """simple docstring""" super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = hidden_act UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = position_embedding_type UpperCAmelCase__ = use_cache
277
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): A__= WavaVecaPhonemeCTCTokenizer A__= False def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" super().setUp() UpperCAmelCase__ = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" " ) UpperCAmelCase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) UpperCAmelCase__ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_lowercase ) + "\n" ) def _UpperCAmelCase ( self : str , _lowercase : Union[str, Any] , _lowercase : str=False , _lowercase : Tuple=20 , _lowercase : int=5 ): """simple docstring""" UpperCAmelCase__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_lowercase )) for i in range(len(_lowercase ) )] UpperCAmelCase__ = list(filter(lambda _lowercase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_lowercase ) , _lowercase ) ) if max_length is not None and len(_lowercase ) > max_length: UpperCAmelCase__ = toks[:max_length] if min_length is not None and len(_lowercase ) < min_length and len(_lowercase ) > 0: while len(_lowercase ) < min_length: UpperCAmelCase__ = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase__ = [t[0] for t in toks] # Ensure consistency UpperCAmelCase__ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase ) if " " not in output_txt and len(_lowercase ) > 1: UpperCAmelCase__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowercase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowercase ) ) if with_prefix_space: UpperCAmelCase__ = " " + output_txt UpperCAmelCase__ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) return output_txt, output_ids def _UpperCAmelCase ( self : Optional[Any] , **_lowercase : Optional[int] ): """simple docstring""" kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) # check adding a single token tokenizer.add_tokens("xxx" ) UpperCAmelCase__ = tokenizer("m xxx ɪ" , do_phonemize=_lowercase ).input_ids self.assertEqual(_lowercase , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"] ) UpperCAmelCase__ = tokenizer("m aaa ɪ ccc" , do_phonemize=_lowercase ).input_ids self.assertEqual(_lowercase , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa UpperCAmelCase__ = tokenizer("maɪ c" , do_phonemize=_lowercase ).input_ids self.assertEqual(_lowercase , [3, 2_00] ) # mai should be <unk> (=3) def _UpperCAmelCase ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) self.assertEqual(_lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" ) def _UpperCAmelCase ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(_lowercase ).input_ids , tokenizer(_lowercase , do_phonemize=_lowercase ).input_ids ) def _UpperCAmelCase ( self : int ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) UpperCAmelCase__ = tokenizer.decode(tokenizer(_lowercase ).input_ids ) self.assertEqual(_lowercase , _lowercase ) def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) UpperCAmelCase__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] UpperCAmelCase__ = tokenizer.decode(sample_ids[0] ) UpperCAmelCase__ = tokenizer.batch_decode(_lowercase ) self.assertEqual(_lowercase , batch_tokens[0] ) self.assertEqual(_lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) def _UpperCAmelCase ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) self.assertEqual(_lowercase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" ) def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(_lowercase ).input_ids , tokenizer(_lowercase , do_phonemize=_lowercase ).input_ids ) def _UpperCAmelCase ( self : str ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off UpperCAmelCase__ = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter UpperCAmelCase__ = tokenizer.decode(sample_ids[0] ) UpperCAmelCase__ = tokenizer.batch_decode(_lowercase ) self.assertEqual(_lowercase , batch_tokens[0] ) self.assertEqual(_lowercase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) # decode with no word_del_token filter UpperCAmelCase__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_lowercase ) UpperCAmelCase__ = tokenizer.batch_decode(_lowercase , filter_word_delimiter_token=_lowercase ) self.assertEqual(_lowercase , batch_tokens[0] ) self.assertEqual(_lowercase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] ) def _UpperCAmelCase ( self : Tuple ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) UpperCAmelCase__ = tokenizer.decode(tokenizer(_lowercase ).input_ids , filter_word_delimiter_token=_lowercase ) self.assertEqual(_lowercase , _lowercase ) def _UpperCAmelCase ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer.phonemize(_lowercase , phonemizer_lang="en-us" ) UpperCAmelCase__ = tokenizer.decode(tokenizer(_lowercase ).input_ids , filter_word_delimiter_token=_lowercase ) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , _lowercase ) def _UpperCAmelCase ( self : str ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=_lowercase ) UpperCAmelCase__ = "Hello how are you" UpperCAmelCase__ = tokenizer(_lowercase , phonemizer_lang="en-us" ).input_ids UpperCAmelCase__ = tokenizer(_lowercase , phonemizer_lang="fr-fr" ).input_ids self.assertNotEqual(_lowercase , _lowercase ) UpperCAmelCase__ = tokenizer.decode(_lowercase ) UpperCAmelCase__ = tokenizer.decode(_lowercase ) self.assertEqual(_lowercase , "h ə l oʊ h aʊ ɑːɹ j uː" ) self.assertEqual(_lowercase , "ɛ l o h aʊ a ʁ j u" ) def _UpperCAmelCase ( self : int ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) UpperCAmelCase__ = "Hello how Are you" UpperCAmelCase__ = "hello how are you" UpperCAmelCase__ = tokenizer(_lowercase ).input_ids UpperCAmelCase__ = tokenizer(_lowercase ).input_ids self.assertEqual(_lowercase , _lowercase ) def _UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) tokenizer.add_tokens(["!", "?"] ) tokenizer.add_special_tokens({"cls_token": "$$$"} ) # fmt: off UpperCAmelCase__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on UpperCAmelCase__ = tokenizer.batch_decode(_lowercase ) self.assertEqual(_lowercase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] ) @staticmethod def _UpperCAmelCase ( _lowercase : int , _lowercase : Tuple ): """simple docstring""" UpperCAmelCase__ = [d[key] for d in offsets] return retrieved_list def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizer(word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" UpperCAmelCase__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on UpperCAmelCase__ = tokenizer.decode(_lowercase , output_char_offsets=_lowercase , filter_word_delimiter_token=_lowercase ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue("text" in outputs ) self.assertTrue("char_offsets" in outputs ) self.assertTrue(isinstance(_lowercase , _lowercase ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _UpperCAmelCase ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizer(word_delimiter_token="|" ) def check_list_tuples_equal(_lowercase : Any , _lowercase : Optional[Any] ): self.assertTrue(isinstance(_lowercase , _lowercase ) ) self.assertTrue(isinstance(outputs_list[0] , _lowercase ) ) # transform list to ModelOutput UpperCAmelCase__ = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] ) def recursive_check(_lowercase : Optional[int] , _lowercase : Optional[int] ): if isinstance(_lowercase , _lowercase ): [recursive_check(_lowercase , _lowercase ) for la, la in zip(_lowercase , _lowercase )] self.assertEqual(_lowercase , _lowercase ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] ) # fmt: off UpperCAmelCase__ = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char UpperCAmelCase__ = tokenizer.batch_decode(_lowercase , output_char_offsets=_lowercase ) UpperCAmelCase__ = [tokenizer.decode(_lowercase , output_char_offsets=_lowercase ) for ids in sample_ids] check_list_tuples_equal(_lowercase , _lowercase ) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" ) def _UpperCAmelCase ( self : int ): """simple docstring""" pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" ) def _UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" ) def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" ) def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" pass def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_lowercase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase__ = tokenizer.vocab_size UpperCAmelCase__ = len(_lowercase ) self.assertNotEqual(_lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) UpperCAmelCase__ = ["aaaaa bbbbbb", "cccccccccdddddddd"] UpperCAmelCase__ = tokenizer.add_tokens(_lowercase ) UpperCAmelCase__ = tokenizer.vocab_size UpperCAmelCase__ = len(_lowercase ) self.assertNotEqual(_lowercase , 0 ) self.assertEqual(_lowercase , _lowercase ) self.assertEqual(_lowercase , len(_lowercase ) ) self.assertEqual(_lowercase , all_size + len(_lowercase ) ) UpperCAmelCase__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_lowercase ) self.assertGreaterEqual(len(_lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) UpperCAmelCase__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} UpperCAmelCase__ = tokenizer.add_special_tokens(_lowercase ) UpperCAmelCase__ = tokenizer.vocab_size UpperCAmelCase__ = len(_lowercase ) self.assertNotEqual(_lowercase , 0 ) self.assertEqual(_lowercase , _lowercase ) self.assertEqual(_lowercase , len(_lowercase ) ) self.assertEqual(_lowercase , all_size_a + len(_lowercase ) ) UpperCAmelCase__ = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_lowercase ) self.assertGreaterEqual(len(_lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _UpperCAmelCase ( self : List[Any] ): """simple docstring""" pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _UpperCAmelCase ( self : Optional[int] ): """simple docstring""" pass def _UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" UpperCAmelCase__ = self.get_tokenizers(fast=_lowercase , do_lower_case=_lowercase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase__ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] UpperCAmelCase__ = tokenizer.convert_tokens_to_string(_lowercase ) self.assertIsInstance(output["text"] , _lowercase )
277
1
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) _lowerCAmelCase : Optional[int] = logging.getLogger() def lowerCamelCase_( ) -> int: '''simple docstring''' _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("-f" ) _lowerCamelCase : Dict = parser.parse_args() return args.f class A_ ( _a ): def _lowercase ( self: Any ): '''simple docstring''' _lowerCamelCase : List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__lowerCAmelCase ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ): '''simple docstring''' _lowerCamelCase : Any = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 ,"run_glue_deebert.py" ) with patch.object(__lowerCAmelCase ,"argv" ,__lowerCAmelCase ): _lowerCamelCase : Dict = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(__lowerCAmelCase ,0.6_66 ) @slow @require_torch_non_multi_gpu def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : str = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split() self.run_and_check(__lowerCAmelCase ) _lowerCamelCase : str = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split() self.run_and_check(__lowerCAmelCase )
46
"""simple docstring""" def lowerCamelCase_ ( _lowerCamelCase : int = 6_0_0_8_5_1_4_7_5_1_4_3 ): try: lowerCamelCase_ = int(_lowerCamelCase ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowerCamelCase_ = 1 lowerCamelCase_ = 2 while i * i <= n: while n % i == 0: lowerCamelCase_ = i n //= i i += 1 if n > 1: lowerCamelCase_ = n return int(_lowerCamelCase ) if __name__ == "__main__": print(f'''{solution() = }''')
142
0
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = Dict[str, Any] lowerCamelCase__ = List[Prediction] @add_end_docstrings(__UpperCAmelCase ) class A__ ( __UpperCAmelCase ): def __init__( self : str , *a : Optional[int] , **a : Union[str, Any] ): '''simple docstring''' super().__init__(*a , **a ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _lowerCamelCase ( self : Dict , **a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = {} if "threshold" in kwargs: lowerCAmelCase__ : List[str] = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self : Union[str, Any] , *a : List[Any] , **a : Any ): '''simple docstring''' return super().__call__(*a , **a ) def _lowerCamelCase ( self : int , a : Dict ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = load_image(a ) lowerCAmelCase__ : str = torch.IntTensor([[image.height, image.width]] ) lowerCAmelCase__ : Any = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: lowerCAmelCase__ : Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) lowerCAmelCase__ : List[Any] = target_size return inputs def _lowerCamelCase ( self : int , a : Union[str, Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = model_inputs.pop('target_size' ) lowerCAmelCase__ : Tuple = self.model(**a ) lowerCAmelCase__ : Dict = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: lowerCAmelCase__ : List[Any] = model_inputs['bbox'] return model_outputs def _lowerCamelCase ( self : Tuple , a : Tuple , a : int=0.9 ): '''simple docstring''' lowerCAmelCase__ : List[str] = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = target_size[0].tolist() def unnormalize(a : Optional[Any] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_000), (height * bbox[1] / 1_000), (width * bbox[2] / 1_000), (height * bbox[3] / 1_000), ] ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) lowerCAmelCase__ : Optional[int] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowerCAmelCase__ : Optional[int] = [unnormalize(a ) for bbox in model_outputs['bbox'].squeeze(0 )] lowerCAmelCase__ : Optional[int] = ['score', 'label', 'box'] lowerCAmelCase__ : int = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowerCAmelCase__ : Tuple = self.image_processor.post_process_object_detection(a , a , a ) lowerCAmelCase__ : Any = raw_annotations[0] lowerCAmelCase__ : List[Any] = raw_annotation['scores'] lowerCAmelCase__ : Any = raw_annotation['labels'] lowerCAmelCase__ : Optional[Any] = raw_annotation['boxes'] lowerCAmelCase__ : Tuple = scores.tolist() lowerCAmelCase__ : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels] lowerCAmelCase__ : Optional[Any] = [self._get_bounding_box(a ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowerCAmelCase__ : Optional[int] = ['score', 'label', 'box'] lowerCAmelCase__ : Optional[Any] = [ dict(zip(a , a ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def _lowerCamelCase ( self : str , a : Tuple ): '''simple docstring''' if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = box.int().tolist() lowerCAmelCase__ : Union[str, Any] = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
716
import os from collections import deque import torch from torch.utils.data import Dataset class A__ ( __magic_name__ ): def __init__( self : Union[str, Any] , a : str="" , a : str="train" ): '''simple docstring''' assert os.path.isdir(a ) lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : Dict = os.listdir(a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a ) if not os.path.isfile(a ): continue self.documents.append(a ) def __len__( self : Any ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , a : Any ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = self.documents[idx] lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1] with open(a , encoding='utf-8' ) as source: lowerCAmelCase__ : List[Any] = source.read() lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a ) return document_name, story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines] # gather article lines lowerCAmelCase__ : int = [] lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ ) while True: try: lowerCAmelCase__ : int = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(SCREAMING_SNAKE_CASE_ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) ) return story_lines, summary_lines def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str: if len(SCREAMING_SNAKE_CASE_ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) ) return sequence def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]: lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase__ : int = sequence == pad_token_id lowerCAmelCase__ : Optional[int] = 0 return mask def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple: lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines] lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence] lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines] lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = [] for sequence in batch: lowerCAmelCase__ : Union[str, Any] = -1 lowerCAmelCase__ : int = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE_ ) return torch.tensor(SCREAMING_SNAKE_CASE_ )
69
0
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __lowercase : """simple docstring""" _snake_case = XGLMConfig _snake_case = {} _snake_case = """gelu""" def __init__( self , A , A=1_4 , A=7 , A=True , A=True , A=True , A=9_9 , A=3_2 , A=2 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=0.02 , ) -> Optional[Any]: snake_case : str = parent snake_case : Dict = batch_size snake_case : Optional[Any] = seq_length snake_case : Any = is_training snake_case : List[Any] = use_input_mask snake_case : Any = use_labels snake_case : int = vocab_size snake_case : int = d_model snake_case : Tuple = num_hidden_layers snake_case : Dict = num_attention_heads snake_case : str = ffn_dim snake_case : List[Any] = activation_function snake_case : Tuple = activation_dropout snake_case : Tuple = attention_dropout snake_case : Optional[int] = max_position_embeddings snake_case : Union[str, Any] = initializer_range snake_case : int = None snake_case : Dict = 0 snake_case : Dict = 2 snake_case : str = 1 def UpperCAmelCase ( self ) -> List[str]: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def UpperCAmelCase ( self ) -> str: snake_case : Optional[Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) snake_case : int = None if self.use_input_mask: snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case : List[Any] = self.get_config() snake_case : Optional[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def UpperCAmelCase ( self ) -> Any: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__lowerCamelCase , ) def UpperCAmelCase ( self ) -> Any: snake_case : Tuple = self.prepare_config_and_inputs() ( snake_case ) : str = config_and_inputs snake_case : Dict = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __lowercase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _snake_case = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _snake_case = (TFXGLMForCausalLM,) if is_tf_available() else () _snake_case = ( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False def UpperCAmelCase ( self ) -> Any: snake_case : Optional[int] = TFXGLMModelTester(self ) snake_case : Any = ConfigTester(self , config_class=__lowerCamelCase , n_embd=3_7 ) def UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @slow def UpperCAmelCase ( self ) -> int: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case : Any = TFXGLMModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def UpperCAmelCase ( self ) -> str: super().test_resize_token_embeddings() @require_tf class __lowercase (unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase ( self , A=True ) -> Tuple: snake_case : Any = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) snake_case : Optional[int] = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off snake_case : Dict = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1] # fmt: on snake_case : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase ) @slow def UpperCAmelCase ( self ) -> List[Any]: snake_case : str = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) snake_case : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) snake_case : str = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) snake_case : Union[str, Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): snake_case : Dict = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase , seed=[7, 0] ) snake_case : str = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase ) snake_case : Any = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) @slow def UpperCAmelCase ( self ) -> Optional[int]: snake_case : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) snake_case : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) snake_case : List[str] = '''left''' # use different length sentences to test batching snake_case : Tuple = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] snake_case : str = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase ) snake_case : Optional[Any] = inputs['''input_ids'''] snake_case : Optional[Any] = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=1_2 ) snake_case : int = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids snake_case : Dict = model.generate(input_ids=__lowerCamelCase , max_new_tokens=1_2 ) snake_case : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids snake_case : Dict = model.generate(input_ids=__lowerCamelCase , max_new_tokens=1_2 ) snake_case : int = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) snake_case : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase ) snake_case : str = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase ) snake_case : Any = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(__lowerCamelCase , __lowerCamelCase ) self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
587
"""simple docstring""" def __lowercase ( snake_case_ : list ) ->list: '''simple docstring''' for i in range(len(snake_case_ ) - 1 ,0 ,-1 ): __A : Union[str, Any] = False for j in range(snake_case_ ,0 ,-1 ): if unsorted[j] < unsorted[j - 1]: __A , __A : Union[str, Any] = unsorted[j - 1], unsorted[j] __A : Optional[int] = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: __A , __A : Optional[Any] = unsorted[j + 1], unsorted[j] __A : Union[str, Any] = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() a_ = input("""Enter numbers separated by a comma:\n""").strip() a_ = [int(item) for item in user_input.split(""",""")] print(f'''{cocktail_shaker_sort(unsorted) = }''')
177
0
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process __magic_name__ : Optional[int] = logging.getLogger(__name__) __magic_name__ : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) __magic_name__ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=__snake_case , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__snake_case )} , ) lowerCAmelCase_ = field( default=__snake_case , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) lowerCAmelCase_ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowerCAmelCase_ = field( default=__snake_case , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def lowercase_ ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class lowerCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowerCAmelCase_ = field(default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) lowerCAmelCase_ = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) lowerCAmelCase_ = field( default=__snake_case , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) lowerCAmelCase_ = field( default=__snake_case , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) lowerCAmelCase_ = field( default=0.1_5 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) lowerCAmelCase_ = field( default=__snake_case , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def lowercase_ ( self ): if self.train_file is not None: A_ = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A_ = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowerCAmelCase ( snake_case__ : Optional[Any] , snake_case__ : Any )-> Any: with open(snake_case__ , "r" , encoding="utf-8" ) as f: A_ = [json.loads(snake_case__ ) for line in f.read().splitlines() if (len(snake_case__ ) > 0 and not line.isspace())] assert len(snake_case__ ) == len(snake_case__ ) A_ = {c: dataset[c] for c in dataset.column_names} A_ = refs return Dataset.from_dict(snake_case__ ) def lowerCAmelCase ( )-> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A_ , A_ , A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A_ , A_ , A_ = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , snake_case__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A_ = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , ) A_ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , ) else: A_ = {} if data_args.train_file is not None: A_ = data_args.train_file if data_args.validation_file is not None: A_ = data_args.validation_file A_ = data_args.train_file.split("." )[-1] if extension == "txt": A_ = "text" A_ = load_dataset(snake_case__ , data_files=snake_case__ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A_ = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: A_ = AutoConfig.from_pretrained(model_args.config_name , **snake_case__ ) elif model_args.model_name_or_path: A_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ ) else: A_ = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(f'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(f'New config: {config}' ) A_ = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case__ ) elif model_args.model_name_or_path: A_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case__ ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: A_ = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) A_ = AutoModelForMaskedLM.from_config(snake_case__ ) model.resize_token_embeddings(len(snake_case__ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A_ = datasets["train"].column_names else: A_ = datasets["validation"].column_names A_ = "text" if "text" in column_names else column_names[0] A_ = "max_length" if data_args.pad_to_max_length else False def tokenize_function(snake_case__ : Tuple ): # Remove empty lines A_ = [line for line in examples["text"] if len(snake_case__ ) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=snake_case__ , truncation=snake_case__ , max_length=data_args.max_seq_length ) A_ = datasets.map( snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A_ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A_ = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A_ = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A_ = False # Data collator # This one will take care of randomly masking the tokens. A_ = DataCollatorForWholeWordMask(tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A_ = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: if last_checkpoint is not None: A_ = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A_ = model_args.model_name_or_path else: A_ = None A_ = trainer.train(resume_from_checkpoint=snake_case__ ) trainer.save_model() # Saves the tokenizer too for easy upload A_ = os.path.join(training_args.output_dir , "train_results.txt" ) if trainer.is_world_process_zero(): with open(snake_case__ , "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # Evaluation A_ = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) A_ = trainer.evaluate() A_ = math.exp(eval_output["eval_loss"] ) A_ = perplexity A_ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(snake_case__ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) return results def lowerCAmelCase ( snake_case__ : List[str] )-> int: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
608
import qiskit def lowerCAmelCase ( snake_case__ : int , snake_case__ : int )-> qiskit.result.counts.Counts: A_ = qiskit.Aer.get_backend("aer_simulator" ) # Create a Quantum Circuit acting on the q register A_ = qiskit.QuantumCircuit(snake_case__ , snake_case__ ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator A_ = qiskit.execute(snake_case__ , snake_case__ , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(snake_case__ ) if __name__ == "__main__": __magic_name__ : List[Any] = single_qubit_measure(2, 2) print(f"""Total count for various states are: {counts}""")
608
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = "▁" SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"} SCREAMING_SNAKE_CASE : str = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } SCREAMING_SNAKE_CASE : int = { "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off SCREAMING_SNAKE_CASE : List[str] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class _lowerCamelCase( _a ): lowercase_ : List[Any] = VOCAB_FILES_NAMES lowercase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowercase_ : Tuple = ["""input_ids""", """attention_mask"""] lowercase_ : List[int] = [] lowercase_ : List[int] = [] def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", lowerCamelCase = None, **lowerCamelCase, ) -> None: """simple docstring""" _lowercase : Optional[Any] = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else mask_token _lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs _lowercase : Any = kwargs.get('additional_special_tokens', []) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowerCamelCase, tgt_lang=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, ) _lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCamelCase)) _lowercase : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _lowercase : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _lowercase : Union[str, Any] = 1 _lowercase : Union[str, Any] = len(self.sp_model) _lowercase : Any = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase) } _lowercase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()} _lowercase : Any = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) _lowercase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _lowercase : Any = src_lang if src_lang is not None else 'en_XX' _lowercase : Union[str, Any] = self.lang_code_to_id[self._src_lang] _lowercase : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def UpperCamelCase ( self) -> int: """simple docstring""" return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def UpperCamelCase ( self) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCamelCase ( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self) -> Dict: """simple docstring""" _lowercase : Tuple = self.__dict__.copy() _lowercase : Optional[int] = None return state def __setstate__( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : Optional[Any] = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs'): _lowercase : str = {} _lowercase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Dict = {self.convert_ids_to_tokens(lowerCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def UpperCamelCase ( self, lowerCamelCase) -> List[str]: """simple docstring""" return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _lowercase : int = self.sp_model.PieceToId(lowerCamelCase) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCamelCase ( self, lowerCamelCase) -> str: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : int = [] _lowercase : str = '' _lowercase : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase) + token _lowercase : str = True _lowercase : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase) _lowercase : List[Any] = False out_string += self.sp_model.decode(lowerCamelCase) return out_string.strip() def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCamelCase): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return _lowercase : Dict = os.path.join( lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, lowerCamelCase) elif not os.path.isfile(self.vocab_file): with open(lowerCamelCase, 'wb') as fi: _lowercase : Tuple = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase) return (out_vocab_file,) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase) _lowercase : List[str] = [1] * len(self.prefix_tokens) _lowercase : Dict = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCamelCase)) + suffix_ones return prefix_ones + ([0] * len(lowerCamelCase)) + ([0] * len(lowerCamelCase)) + suffix_ones def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> List[str]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') _lowercase : str = src_lang _lowercase : List[str] = self(lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase) _lowercase : Dict = self.convert_tokens_to_ids(lowerCamelCase) _lowercase : int = tgt_lang_id return inputs def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = "en_XX", lowerCamelCase = None, lowerCamelCase = "ro_RO", **lowerCamelCase, ) -> BatchEncoding: """simple docstring""" _lowercase : Dict = src_lang _lowercase : str = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase, lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang) def UpperCamelCase ( self) -> Any: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang) def UpperCamelCase ( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : Any = self.lang_code_to_id[src_lang] _lowercase : List[str] = [self.cur_lang_code_id] _lowercase : int = [self.eos_token_id] def UpperCamelCase ( self, lowerCamelCase) -> None: """simple docstring""" _lowercase : str = self.lang_code_to_id[tgt_lang] _lowercase : str = [self.cur_lang_code_id] _lowercase : Union[str, Any] = [self.eos_token_id]
89
"""simple docstring""" def lowercase__ ( lowerCamelCase : int , lowerCamelCase : int ) -> int: return int(input_a == input_a == 0 ) def lowercase__ ( ) -> None: print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" ) print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" ) print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" ) print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" ) if __name__ == "__main__": import doctest doctest.testmod() main()
308
0
from __future__ import annotations from math import pow, sqrt def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
700
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ = None): '''simple docstring''' if components is None: lowercase__ : List[str] = [] lowercase__ : Dict = list(SCREAMING_SNAKE_CASE_) def __len__( self): '''simple docstring''' return len(self.__components) def __str__( self): '''simple docstring''' return "(" + ",".join(map(SCREAMING_SNAKE_CASE_ , self.__components)) + ")" def __add__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Optional[Any] = len(self) if size == len(SCREAMING_SNAKE_CASE_): lowercase__ : List[str] = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)] return Vector(SCREAMING_SNAKE_CASE_) else: raise Exception("""must have the same size""") def __sub__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = len(self) if size == len(SCREAMING_SNAKE_CASE_): lowercase__ : Optional[Any] = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)] return Vector(SCREAMING_SNAKE_CASE_) else: # error case raise Exception("""must have the same size""") @overload def __mul__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' ... @overload def __mul__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' ... def __mul__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , (float, int)): lowercase__ : Optional[int] = [c * other for c in self.__components] return Vector(SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and len(self) == len(SCREAMING_SNAKE_CASE_): lowercase__ : Dict = len(self) lowercase__ : Optional[Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE_) for i in range(SCREAMING_SNAKE_CASE_)] return sum(SCREAMING_SNAKE_CASE_) else: # error case raise Exception("""invalid operand!""") def lowercase__ ( self): '''simple docstring''' return Vector(self.__components) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception("""index out of range""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' assert -len(self.__components) <= pos < len(self.__components) lowercase__ : List[Any] = value def lowercase__ ( self): '''simple docstring''' if len(self.__components) == 0: raise Exception("""Vector is empty""") lowercase__ : Union[str, Any] = [c**2 for c in self.__components] return math.sqrt(sum(SCREAMING_SNAKE_CASE_)) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False): '''simple docstring''' lowercase__ : Union[str, Any] = self * other lowercase__ : Optional[Any] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def UpperCamelCase ( lowercase_ ) -> Vector: '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) return Vector([0] * dimension ) def UpperCamelCase ( lowercase_ , lowercase_ ) -> Vector: '''simple docstring''' assert isinstance(lowercase_ , lowercase_ ) and (isinstance(lowercase_ , lowercase_ )) lowercase__ : Union[str, Any] = [0] * dimension lowercase__ : Any = 1 return Vector(lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector: '''simple docstring''' assert ( isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ) and (isinstance(lowercase_ , (int, float) )) ) return x * scalar + y def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Vector: '''simple docstring''' random.seed(lowercase_ ) lowercase__ : int = [random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )] return Vector(lowercase_ ) class _snake_case : def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : List[Any] = matrix lowercase__ : Any = w lowercase__ : Any = h def __str__( self): '''simple docstring''' lowercase__ : str = """""" for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): lowercase__ : Tuple = [] for i in range(self.__height): lowercase__ : Tuple = [ self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for j in range(self.__width) ] matrix.append(SCREAMING_SNAKE_CASE_) return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height) else: raise Exception("""matrix must have the same dimension!""") def __sub__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): lowercase__ : Optional[int] = [] for i in range(self.__height): lowercase__ : List[str] = [ self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) for j in range(self.__width) ] matrix.append(SCREAMING_SNAKE_CASE_) return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height) else: raise Exception("""matrices must have the same dimension!""") @overload def __mul__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' ... @overload def __mul__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' ... def __mul__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): # matrix-vector if len(SCREAMING_SNAKE_CASE_) == self.__width: lowercase__ : List[Any] = zero_vector(self.__height) for i in range(self.__height): lowercase__ : Union[str, Any] = [ self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE_) for j in range(self.__width) ] ans.change_component(SCREAMING_SNAKE_CASE_ , sum(SCREAMING_SNAKE_CASE_)) return ans else: raise Exception( """vector must have the same size as the """ """number of columns of the matrix!""") elif isinstance(SCREAMING_SNAKE_CASE_ , (int, float)): # matrix-scalar lowercase__ : Tuple = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(SCREAMING_SNAKE_CASE_ , self.__width , self.__height) return None def lowercase__ ( self): '''simple docstring''' return self.__height def lowercase__ ( self): '''simple docstring''' return self.__width def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("""change_component: indices out of bounds""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: lowercase__ : Tuple = value else: raise Exception("""change_component: indices out of bounds""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.__height != self.__width: raise Exception("""Matrix is not square""") lowercase__ : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(SCREAMING_SNAKE_CASE_)): lowercase__ : List[str] = minor[i][:y] + minor[i][y + 1 :] return Matrix(SCREAMING_SNAKE_CASE_ , self.__width - 1 , self.__height - 1).determinant() def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' if self.__height != self.__width: raise Exception("""Matrix is not square""") if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else: raise Exception("""Indices out of bounds""") def lowercase__ ( self): '''simple docstring''' if self.__height != self.__width: raise Exception("""Matrix is not square""") if self.__height < 1: raise Exception("""Matrix has no element""") elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: lowercase__ : Optional[int] = [ self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE_) for y in range(self.__width) ] return sum(SCREAMING_SNAKE_CASE_) def UpperCamelCase ( lowercase_ ) -> Matrix: '''simple docstring''' lowercase__ : list[list[float]] = [[0] * n for _ in range(lowercase_ )] return Matrix(lowercase_ , lowercase_ , lowercase_ ) def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Matrix: '''simple docstring''' random.seed(lowercase_ ) lowercase__ : list[list[float]] = [ [random.randint(lowercase_ , lowercase_ ) for _ in range(lowercase_ )] for _ in range(lowercase_ ) ] return Matrix(lowercase_ , lowercase_ , lowercase_ )
495
0
'''simple docstring''' from typing import List import numpy as np def __UpperCAmelCase ( a_: dict ): _UpperCAmelCase : Any = {key: len(A__ ) for key, value in gen_kwargs.items() if isinstance(A__, A__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n" + "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _UpperCAmelCase : List[Any] = max(lists_lengths.values(), default=0 ) return max(1, A__ ) def __UpperCAmelCase ( a_: int, a_: int ): _UpperCAmelCase : Tuple = [] for group_idx in range(A__ ): _UpperCAmelCase : Any = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _UpperCAmelCase : Any = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _UpperCAmelCase : int = range(A__, start + num_shards_to_add ) shards_indices_per_group.append(A__ ) return shards_indices_per_group def __UpperCAmelCase ( a_: dict, a_: int ): _UpperCAmelCase : Optional[Any] = _number_of_shards_in_gen_kwargs(A__ ) if num_shards == 1: return [dict(A__ )] else: _UpperCAmelCase : Any = _distribute_shards(num_shards=A__, max_num_jobs=A__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(A__, A__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(A__ ) ) ] def __UpperCAmelCase ( a_: List[dict] ): return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key], A__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __UpperCAmelCase ( a_: np.random.Generator, a_: dict ): _UpperCAmelCase : Any = {len(A__ ) for value in gen_kwargs.values() if isinstance(A__, A__ )} _UpperCAmelCase : Tuple = {} for size in list_sizes: _UpperCAmelCase : Union[str, Any] = list(range(A__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _UpperCAmelCase : Optional[int] = dict(A__ ) for key, value in shuffled_kwargs.items(): if isinstance(A__, A__ ): _UpperCAmelCase : int = [value[i] for i in indices_per_size[len(A__ )]] return shuffled_kwargs
494
def _lowerCAmelCase ( A__: int , A__: int ): '''simple docstring''' if not isinstance(A__ , A__ ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(A__ , A__ ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) UpperCAmelCase = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(A__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
254
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class _snake_case (__SCREAMING_SNAKE_CASE): __A : Tuple ="roberta-prelayernorm" def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,_snake_case="absolute" ,_snake_case=True ,_snake_case=None ,**_snake_case ,): super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case ) UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : int = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : str = layer_norm_eps UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : Any = use_cache UpperCAmelCase_ : List[Any] = classifier_dropout class _snake_case (__SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): if self.task == "multiple-choice": UpperCAmelCase_ : Any = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
710
'''simple docstring''' from __future__ import annotations from typing import Any def a__ ( _SCREAMING_SNAKE_CASE : list ) -> int: """simple docstring""" if not postfix_notation: return 0 UpperCAmelCase_ : Tuple = {"+", "-", "*", "/"} UpperCAmelCase_ : list[Any] = [] for token in postfix_notation: if token in operations: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_SCREAMING_SNAKE_CASE ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
323
0
import math class a : def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> int: '''simple docstring''' A__ : str =0.0 A__ : Optional[Any] =0.0 for i in range(len(__UpperCamelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> list[list[int | float]]: '''simple docstring''' for i in range(len(__UpperCamelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def SCREAMING_SNAKE_CASE__ ( ) -> None: # Training Examples ( m, n ) A__ : List[Any] =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) A__ : Any =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training A__ : List[str] =SelfOrganizingMap() A__ : Dict =3 A__ : Optional[int] =0.5 for _ in range(snake_case_ ): for j in range(len(snake_case_ ) ): # training sample A__ : str =training_samples[j] # Compute the winning vector A__ : Tuple =self_organizing_map.get_winner(snake_case_, snake_case_ ) # Update the winning vector A__ : Optional[int] =self_organizing_map.update(snake_case_, snake_case_, snake_case_, snake_case_ ) # classify test sample A__ : Optional[int] =[0, 0, 0, 1] A__ : Any =self_organizing_map.get_winner(snake_case_, snake_case_ ) # results print(f'Clusters that the test sample belongs to : {winner}' ) print(f'Weights that have been trained : {weights}' ) # running the main() function if __name__ == "__main__": main()
416
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class a ( UpperCamelCase_ ,unittest.TestCase ): __lowercase = VideoToVideoSDPipeline __lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""} __lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""} __lowercase = PipelineTesterMixin.required_optional_params - {"""latents"""} __lowercase = False # No `output_type`. __lowercase = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ] ) def lowerCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) A__ : Optional[Any] =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) A__ : str =DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , ) torch.manual_seed(0 ) A__ : Any =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A__ : Dict =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) A__ : List[Any] =CLIPTextModel(__UpperCamelCase ) A__ : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) A__ : List[str] ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase=0 )-> Tuple: '''simple docstring''' A__ : Any =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) if str(__UpperCamelCase ).startswith('''mps''' ): A__ : Dict =torch.manual_seed(__UpperCamelCase ) else: A__ : List[Any] =torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) A__ : Optional[int] ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''video''': video, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def lowerCAmelCase_ ( self )-> Tuple: '''simple docstring''' A__ : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator A__ : Dict =self.get_dummy_components() A__ : str =VideoToVideoSDPipeline(**__UpperCamelCase ) A__ : Tuple =sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) A__ : List[str] =self.get_dummy_inputs(__UpperCamelCase ) A__ : Any ='''np''' A__ : Optional[Any] =sd_pipe(**__UpperCamelCase ).frames A__ : Optional[int] =frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) A__ : List[Any] =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCamelCase , expected_max_diff=5E-3 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def lowerCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def lowerCAmelCase_ ( self )-> List[Any]: '''simple docstring''' pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def lowerCAmelCase_ ( self )-> Tuple: '''simple docstring''' pass def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class a ( unittest.TestCase ): def lowerCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' A__ : Dict =VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames A__ : List[Any] =torch.Generator(device='''cpu''' ).manual_seed(0 ) A__ : Any =torch.randn((1, 10, 3, 10_24, 5_76) , generator=__UpperCamelCase ) A__ : Tuple =video.to('''cuda''' ) A__ : List[Any] ='''Spiderman is surfing''' A__ : int =pipe(__UpperCamelCase , video=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=3 , output_type='''pt''' ).frames A__ : List[Any] =np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
416
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer __a = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast __a = TaTokenizerFast __a = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys __a = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
707
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def __UpperCAmelCase ( ): _UpperCAmelCase : int = ArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=a_ ) _UpperCAmelCase : Union[str, Any] = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=a_ ) env_command_parser(subparsers=a_ ) launch_command_parser(subparsers=a_ ) tpu_command_parser(subparsers=a_ ) test_command_parser(subparsers=a_ ) # Let's go _UpperCAmelCase : List[Any] = parser.parse_args() if not hasattr(a_, "func" ): parser.print_help() exit(1 ) # Run args.func(a_ ) if __name__ == "__main__": main()
257
0