code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): __SCREAMING_SNAKE_CASE = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0]) __SCREAMING_SNAKE_CASE = get_activation("""gelu""") self.assertTrue(torch.allclose(gelu_python(lowerCAmelCase__) , torch_builtin(lowerCAmelCase__))) self.assertFalse(torch.allclose(gelu_python(lowerCAmelCase__) , gelu_new(lowerCAmelCase__))) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0]) __SCREAMING_SNAKE_CASE = get_activation("""gelu""") __SCREAMING_SNAKE_CASE = get_activation("""gelu_10""") __SCREAMING_SNAKE_CASE = torch_builtin(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = geluaa(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.where(y_gelu_aa < 10.0 , 1 , 0) self.assertTrue(torch.max(lowerCAmelCase__).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask)) def snake_case_ ( self): get_activation("""gelu""") get_activation("""gelu_10""") get_activation("""gelu_fast""") get_activation("""gelu_new""") get_activation("""gelu_python""") get_activation("""gelu_pytorch_tanh""") get_activation("""linear""") get_activation("""mish""") get_activation("""quick_gelu""") get_activation("""relu""") get_activation("""sigmoid""") get_activation("""silu""") get_activation("""swish""") get_activation("""tanh""") with self.assertRaises(lowerCAmelCase__): get_activation("""bogus""") with self.assertRaises(lowerCAmelCase__): get_activation(lowerCAmelCase__) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = get_activation("""gelu""") __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = get_activation("""gelu""") self.assertEqual(acta.a , 1) with self.assertRaises(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = acta.a
155
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __magic_name__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: __SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) __SCREAMING_SNAKE_CASE = value else: __SCREAMING_SNAKE_CASE = value return new_state_dict def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=False ): __SCREAMING_SNAKE_CASE = """""" if is_panoptic: __SCREAMING_SNAKE_CASE = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) __SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict __SCREAMING_SNAKE_CASE = in_proj_weight[:256, :] __SCREAMING_SNAKE_CASE = in_proj_bias[:256] __SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :] __SCREAMING_SNAKE_CASE = in_proj_bias[256:512] __SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :] __SCREAMING_SNAKE_CASE = in_proj_bias[-256:] def _lowerCAmelCase ( ): __SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg""" __SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: __SCREAMING_SNAKE_CASE = """resnet101""" if "dc5" in model_name: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = """panoptic""" in model_name if is_panoptic: __SCREAMING_SNAKE_CASE = 250 else: __SCREAMING_SNAKE_CASE = 91 __SCREAMING_SNAKE_CASE = """huggingface/label-files""" __SCREAMING_SNAKE_CASE = """coco-detection-id2label.json""" __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) __SCREAMING_SNAKE_CASE = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} # load image processor __SCREAMING_SNAKE_CASE = """coco_panoptic""" if is_panoptic else """coco_detection""" __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image __SCREAMING_SNAKE_CASE = prepare_img() __SCREAMING_SNAKE_CASE = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = encoding["""pixel_values"""] logger.info(f"Converting model {model_name}..." ) # load original model from torch hub __SCREAMING_SNAKE_CASE = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() __SCREAMING_SNAKE_CASE = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: __SCREAMING_SNAKE_CASE = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __SCREAMING_SNAKE_CASE = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val elif "class_labels_classifier" in key or "bbox_predictor" in key: __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): __SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = val # finally, create HuggingFace model and load state dict __SCREAMING_SNAKE_CASE = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion __SCREAMING_SNAKE_CASE = conditional_detr(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 ) # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) __magic_name__ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
155
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['OwlViTFeatureExtractor'] lowerCAmelCase_ = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
718
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase_ = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase_ = { """unc-nlp/lxmert-base-uncased""": 512, } lowerCAmelCase_ = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : List[Any] =VOCAB_FILES_NAMES a_ : Tuple =PRETRAINED_VOCAB_FILES_MAP a_ : Optional[Any] =PRETRAINED_INIT_CONFIGURATION a_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : Any =LxmertTokenizer def __init__( self : Any , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="[UNK]" , UpperCamelCase : List[Any]="[SEP]" , UpperCamelCase : List[Any]="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=None , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) _snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars ): _snake_case : List[Any] = getattr(UpperCamelCase , normalizer_state.pop('type' ) ) _snake_case : Optional[int] = do_lower_case _snake_case : Dict = strip_accents _snake_case : Optional[int] = tokenize_chinese_chars _snake_case : Optional[Any] = normalizer_class(**UpperCamelCase ) _snake_case : int = do_lower_case def UpperCamelCase_ ( self : int , UpperCamelCase : List[str] , UpperCamelCase : str=None ): '''simple docstring''' _snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' _snake_case : Tuple = [self.sep_token_id] _snake_case : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' _snake_case : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase )
669
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) __lowercase : Optional[int] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
76
SCREAMING_SNAKE_CASE__ : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634e-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355_818, } def a__ ( snake_case__ : str , snake_case__ : str , snake_case__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _UpperCAmelCase : Optional[Any] = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(snake_case__ )}''' ) raise ValueError(snake_case__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
643
0
'''simple docstring''' def SCREAMING_SNAKE_CASE__ ( __A ) -> list[int]: _snake_case = len(__A ) for i in range(__A ): for j in range(i + 1 , __A ): if numbers[j] < numbers[i]: _snake_case , _snake_case = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() lowercase : str = [int(item) for item in user_input.split(",")] print(exchange_sort(unsorted))
542
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __UpperCAmelCase : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ): """simple docstring""" _snake_case = parent _snake_case = batch_size _snake_case = seq_length _snake_case = is_training _snake_case = use_token_type_ids _snake_case = use_labels _snake_case = vocab_size _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = max_position_embeddings _snake_case = type_vocab_size _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = num_labels _snake_case = num_choices _snake_case = scope _snake_case = self.vocab_size - 1 def lowerCamelCase ( self ): """simple docstring""" _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case = None if self.use_token_type_ids: _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _snake_case = None _snake_case = None _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _snake_case = ids_tensor([self.batch_size] , self.num_choices ) _snake_case = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _snake_case = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ): """simple docstring""" _snake_case = OpenAIGPTModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ): """simple docstring""" _snake_case = OpenAIGPTLMHeadModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ): """simple docstring""" _snake_case = OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ): """simple docstring""" _snake_case = self.num_labels _snake_case = OpenAIGPTForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.prepare_config_and_inputs() ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) = config_and_inputs _snake_case = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): __lowercase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __lowercase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __lowercase = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , ) _snake_case = inputs_dict['labels'] _snake_case = inputs_dict['labels'] _snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , ) _snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def lowerCamelCase ( self ): """simple docstring""" _snake_case = OpenAIGPTModelTester(self ) _snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 ) def lowerCamelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ ) @slow def lowerCamelCase ( self ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = OpenAIGPTModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch class __UpperCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase ( self ): """simple docstring""" _snake_case = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(lowerCAmelCase_ ) _snake_case = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is _snake_case = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the _snake_case = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ ) self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
542
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor lowercase_ : Optional[int] = logging.get_logger(__name__) class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self , *snake_case__ , **snake_case__ ): """simple docstring""" warnings.warn( "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PerceiverImageProcessor instead." , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
572
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ : Union[str, Any] = logging.get_logger(__name__) lowercase_ : Any = { '''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''', } class UpperCamelCase ( __SCREAMING_SNAKE_CASE ): A__ = """timesformer""" def __init__( self , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=8 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-6 , snake_case__=True , snake_case__="divided_space_time" , snake_case__=0 , **snake_case__ , ): """simple docstring""" super().__init__(**snake_case__ ) _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : Optional[int] = patch_size _SCREAMING_SNAKE_CASE : List[Any] = num_channels _SCREAMING_SNAKE_CASE : Any = num_frames _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size _SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers _SCREAMING_SNAKE_CASE : Any = num_attention_heads _SCREAMING_SNAKE_CASE : Tuple = intermediate_size _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[str] = initializer_range _SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps _SCREAMING_SNAKE_CASE : int = qkv_bias _SCREAMING_SNAKE_CASE : Optional[Any] = attention_type _SCREAMING_SNAKE_CASE : str = drop_path_rate
572
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class snake_case_ ( A__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase : List[str] =ReformerTokenizer __lowerCAmelCase : List[Any] =ReformerTokenizerFast __lowerCAmelCase : List[str] =True __lowerCAmelCase : Union[str, Any] =False __lowerCAmelCase : List[Any] =True def __UpperCAmelCase ( self): super().setUp() lowerCamelCase__ = ReformerTokenizer(UpperCamelCase , keep_accents=UpperCamelCase) tokenizer.save_pretrained(self.tmpdirname) def __UpperCAmelCase ( self): lowerCamelCase__ = "<s>" lowerCamelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase) , UpperCamelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase) , UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<unk>") self.assertEqual(vocab_keys[1] , "<s>") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCamelCase) , 10_00) def __UpperCAmelCase ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_00) def __UpperCAmelCase ( self): if not self.test_rust_tokenizer: return lowerCamelCase__ = self.get_tokenizer() lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = "I was born in 92000, and this is falsé." lowerCamelCase__ = tokenizer.tokenize(UpperCamelCase) lowerCamelCase__ = rust_tokenizer.tokenize(UpperCamelCase) self.assertListEqual(UpperCamelCase , UpperCamelCase) lowerCamelCase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase) lowerCamelCase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase) self.assertListEqual(UpperCamelCase , UpperCamelCase) lowerCamelCase__ = self.get_rust_tokenizer() lowerCamelCase__ = tokenizer.encode(UpperCamelCase) lowerCamelCase__ = rust_tokenizer.encode(UpperCamelCase) self.assertListEqual(UpperCamelCase , UpperCamelCase) def __UpperCAmelCase ( self , UpperCamelCase=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase) # Simple input lowerCamelCase__ = "This is a simple input" lowerCamelCase__ = ["This is a simple input 1", "This is a simple input 2"] lowerCamelCase__ = ("This is a simple input", "This is a pair") lowerCamelCase__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length") # Simple input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length") # Simple input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode , UpperCamelCase , max_length=UpperCamelCase , padding="max_length") # Pair input self.assertRaises(UpperCamelCase , tokenizer_r.encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length") # Pair input self.assertRaises( UpperCamelCase , tokenizer_r.batch_encode_plus , UpperCamelCase , max_length=UpperCamelCase , padding="max_length" , ) def __UpperCAmelCase ( self): pass def __UpperCAmelCase ( self): lowerCamelCase__ = ReformerTokenizer(UpperCamelCase , keep_accents=UpperCamelCase) lowerCamelCase__ = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase) , [2_85, 46, 10, 1_70, 3_82] , ) lowerCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCamelCase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase) self.assertListEqual( UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCamelCase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __UpperCAmelCase ( self): return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment") @slow def __UpperCAmelCase ( self): lowerCamelCase__ = "Hello World!" lowerCamelCase__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase)) @slow def __UpperCAmelCase ( self): lowerCamelCase__ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) lowerCamelCase__ = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase)) @require_torch @slow def __UpperCAmelCase ( self): import torch from transformers import ReformerConfig, ReformerModel # Build sequence lowerCamelCase__ = list(self.big_tokenizer.get_vocab().keys())[:10] lowerCamelCase__ = " ".join(UpperCamelCase) lowerCamelCase__ = self.big_tokenizer.encode_plus(UpperCamelCase , return_tensors="pt") lowerCamelCase__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt") lowerCamelCase__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) lowerCamelCase__ = encoded_sequence["input_ids"].shape lowerCamelCase__ = ReformerModel(UpperCamelCase) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCamelCase) model(**UpperCamelCase) @slow def __UpperCAmelCase ( self): # fmt: off lowerCamelCase__ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 lowerCamelCase__ = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=UpperCamelCase , sequences=UpperCamelCase , )
701
'''simple docstring''' lowerCAmelCase_ = "Alexander Joslin" import operator as op from .stack import Stack def lowerCAmelCase( a__ : str ): '''simple docstring''' lowerCamelCase__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub} lowerCamelCase__ = Stack() lowerCamelCase__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(a__ ) ) elif i in operators: # RULE 2 operator_stack.push(a__ ) elif i == ")": # RULE 4 lowerCamelCase__ = operator_stack.peek() operator_stack.pop() lowerCamelCase__ = operand_stack.peek() operand_stack.pop() lowerCamelCase__ = operand_stack.peek() operand_stack.pop() lowerCamelCase__ = operators[opr](a__ , a__ ) operand_stack.push(a__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": lowerCAmelCase_ = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
426
0
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str ): print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ): __UpperCAmelCase : List[Any] = [[float("""inf""" ) for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): __UpperCAmelCase : Optional[int] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__lowerCamelCase ): # looping through rows of graph array for i in range(__lowerCamelCase ): # looping through columns of graph array for j in range(__lowerCamelCase ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): __UpperCAmelCase : Any = dist[i][k] + dist[k][j] _print_dist(__lowerCamelCase , __lowerCamelCase ) return dist, v if __name__ == "__main__": a : Any = int(input("Enter number of vertices: ")) a : Optional[int] = int(input("Enter number of edges: ")) a : List[Any] = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): a : Any = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("\nEdge ", i + 1) a : str = int(input("Enter source:")) a : Optional[int] = int(input("Enter destination:")) a : Optional[Any] = float(input("Enter weight:")) a : Union[str, Any] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
63
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase ( _A, _A, _A ): """simple docstring""" __magic_name__ : Any = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""") __magic_name__ : List[str] = ( ("""layer.""", """layer_"""), ("""word_embeddings.weight""", """word_embeddings"""), ("""position_embeddings.weight""", """position_embeddings"""), ("""token_type_embeddings.weight""", """token_type_embeddings"""), (""".""", """/"""), ("""LayerNorm/weight""", """LayerNorm/gamma"""), ("""LayerNorm/bias""", """LayerNorm/beta"""), ("""weight""", """kernel"""), ) if not os.path.isdir(_A ): os.makedirs(_A ) __magic_name__ : str = model.state_dict() def to_tf_var_name(_A ): for patt, repl in iter(_A ): __magic_name__ : int = name.replace(_A, _A ) return f'bert/{name}' def create_tf_var(_A, _A, _A ): __magic_name__ : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) __magic_name__ : int = tf.get_variable(dtype=_A, shape=tensor.shape, name=_A, initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(_A ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __magic_name__ : int = to_tf_var_name(_A ) __magic_name__ : Tuple = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __magic_name__ : Tuple = torch_tensor.T __magic_name__ : int = create_tf_var(tensor=_A, name=_A, session=_A ) tf.keras.backend.set_value(_A, _A ) __magic_name__ : int = session.run(_A ) print(f'Successfully created {tf_name}: {np.allclose(_A, _A )}' ) __magic_name__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(_A, os.path.join(_A, model_name.replace("""-""", """_""" ) + """.ckpt""" ) ) def UpperCamelCase ( _A=None ): """simple docstring""" __magic_name__ : int = argparse.ArgumentParser() parser.add_argument("""--model_name""", type=_A, required=_A, help="""model name e.g. bert-base-uncased""" ) parser.add_argument( """--cache_dir""", type=_A, default=_A, required=_A, help="""Directory containing pytorch model""" ) parser.add_argument("""--pytorch_model_path""", type=_A, required=_A, help="""/path/to/<pytorch-model-name>.bin""" ) parser.add_argument("""--tf_cache_dir""", type=_A, required=_A, help="""Directory in which to save tensorflow model""" ) __magic_name__ : List[str] = parser.parse_args(_A ) __magic_name__ : Union[str, Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=_A, ckpt_dir=args.tf_cache_dir, model_name=args.model_name ) if __name__ == "__main__": main()
324
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _UpperCAmelCase ( unittest.TestCase ): def __init__( self , a__): A__ = parent def snake_case_ ( self): return {} def lowerCAmelCase__ ( )-> List[Any]: A__ = '''<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>''' A__ = '''\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n ''' return [html_string_a, html_string_a] @require_bsa class _UpperCAmelCase ( A__ , unittest.TestCase ): UpperCamelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None def snake_case_ ( self): A__ = MarkupLMFeatureExtractionTester(self) @property def snake_case_ ( self): return self.feature_extract_tester.prepare_feat_extract_dict() def snake_case_ ( self): A__ = self.feature_extraction_class() # Test not batched input A__ = get_html_strings()[0] A__ = feature_extractor(a__) # fmt: off A__ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] A__ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , a__) self.assertEqual(encoding.xpaths , a__) # Test batched A__ = get_html_strings() A__ = feature_extractor(a__) # fmt: off A__ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] A__ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes) , 2) self.assertEqual(len(encoding.xpaths) , 2) self.assertEqual(encoding.nodes , a__) self.assertEqual(encoding.xpaths , a__)
701
def lowerCAmelCase__ ( UpperCamelCase_ : float , UpperCamelCase_ : int )-> float: if digit_amount > 0: return round(number - int(UpperCamelCase_ ) , UpperCamelCase_ ) return number - int(UpperCamelCase_ ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
526
0
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Optional[int] = AutoencoderKL snake_case__ :int = 'sample' snake_case__ :str = 1e-2 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = 4 lowerCAmelCase__ = 3 lowerCAmelCase__ = (32, 32) lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ ) return {"sample": image} @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return (3, 32, 32) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return (3, 32, 32) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowerCAmelCase__ = self.dummy_input return init_dict, inputs_dict def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase__ = self.model_class(**__magic_name__ ) model.to(__magic_name__ ) assert not model.is_gradient_checkpointing and model.training lowerCAmelCase__ = model(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() lowerCAmelCase__ = torch.randn_like(__magic_name__ ) lowerCAmelCase__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing lowerCAmelCase__ = self.model_class(**__magic_name__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__magic_name__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training lowerCAmelCase__ = model_a(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() lowerCAmelCase__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) lowerCAmelCase__ = dict(model.named_parameters() ) lowerCAmelCase__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__magic_name__ ) self.assertIsNotNone(__magic_name__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__magic_name__ ) lowerCAmelCase__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) lowerCAmelCase__ = model.to(__magic_name__ ) model.eval() if torch_device == "mps": lowerCAmelCase__ = torch.manual_seed(0 ) else: lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(0 ) lowerCAmelCase__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase__ = image.to(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , sample_posterior=__magic_name__ , generator=__magic_name__ ).sample lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": lowerCAmelCase__ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": lowerCAmelCase__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: lowerCAmelCase__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ): """simple docstring""" return f"""gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy""" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str]=0 , __magic_name__ : str=(4, 3, 512, 512) , __magic_name__ : str=False ): """simple docstring""" lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa lowerCAmelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) ).to(__magic_name__ ).to(__magic_name__ ) return image def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[str]="CompVis/stable-diffusion-v1-4" , __magic_name__ : Optional[Any]=False ): """simple docstring""" lowerCAmelCase__ = "fp16" if fpaa else None lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa lowerCAmelCase__ = AutoencoderKL.from_pretrained( __magic_name__ , subfolder="vae" , torch_dtype=__magic_name__ , revision=__magic_name__ , ) model.to(__magic_name__ ).eval() return model def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any]=0 ): """simple docstring""" if torch_device == "mps": return torch.manual_seed(__magic_name__ ) return torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.encode(__magic_name__ ).latent_dist lowerCAmelCase__ = dist.sample(generator=__magic_name__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] lowerCAmelCase__ = sample[0, -1, -3:, -3:].flatten().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) lowerCAmelCase__ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__magic_name__ , __magic_name__ , atol=__magic_name__ )
48
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
696
0
def __lowerCAmelCase ( UpperCAmelCase__ : str ) -> str: return " ".join( """""".join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
103
from ..utils import DummyObject, requires_backends class __A( metaclass=UpperCAmelCase ): SCREAMING_SNAKE_CASE = ['''torch''', '''scipy'''] def __init__( self : Any , *__UpperCamelCase : str , **__UpperCamelCase : Any ): requires_backends(self , ["""torch""", """scipy"""] ) @classmethod def lowercase__ ( cls : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ): requires_backends(cls , ["""torch""", """scipy"""] ) @classmethod def lowercase__ ( cls : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["""torch""", """scipy"""] )
103
1
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :int ) -> Optional[int]: # Checks if the entire collection has been sorted if len(SCREAMING_SNAKE_CASE ) <= 1 or n <= 1: return insert_next(SCREAMING_SNAKE_CASE , n - 1 ) rec_insertion_sort(SCREAMING_SNAKE_CASE , n - 1 ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :int ) -> Dict: # Checks order between adjacent elements if index >= len(SCREAMING_SNAKE_CASE ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowerCAmelCase , __lowerCAmelCase : Tuple = ( collection[index], collection[index - 1], ) insert_next(SCREAMING_SNAKE_CASE , index + 1 ) if __name__ == "__main__": _UpperCAmelCase = input('Enter integers separated by spaces: ') _UpperCAmelCase = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
504
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": _UpperCAmelCase = input('Enter image url: ').strip() print(f'''Downloading image from {url} ...''') _UpperCAmelCase = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image _UpperCAmelCase = soup.find('meta', {'property': 'og:image'})['content'] _UpperCAmelCase = requests.get(image_url).content _UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, 'wb') as fp: fp.write(image_data) print(f'''Done. Image saved to disk as {file_name}.''')
504
1
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A : Optional[Any] = '▁' __A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( lowercase__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = BigBirdTokenizer __UpperCAmelCase : str = BigBirdTokenizerFast __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Optional[int] = True def snake_case ( self : Any ): super().setUp() __lowercase : Optional[int] = self.tokenizer_class(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : int ): __lowercase : Optional[int] = '''<s>''' __lowercase : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def snake_case ( self : Optional[int] ): __lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(UpperCAmelCase__ ) , 1_0_0_4 ) def snake_case ( self : Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def snake_case ( self : Optional[Any] ): if not self.test_rust_tokenizer: return __lowercase : Optional[int] = self.get_tokenizer() __lowercase : Any = self.get_rust_tokenizer() __lowercase : int = '''I was born in 92000, and this is falsé.''' __lowercase : List[str] = tokenizer.tokenize(UpperCAmelCase__ ) __lowercase : Dict = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __lowercase : str = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) __lowercase : Union[str, Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) __lowercase : Optional[Any] = self.get_rust_tokenizer() __lowercase : Optional[Any] = tokenizer.encode(UpperCAmelCase__ ) __lowercase : Union[str, Any] = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def snake_case ( self : Any ): __lowercase : Tuple = BigBirdTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) __lowercase : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) __lowercase : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __lowercase : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) __lowercase : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def snake_case ( self : str ): return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def snake_case ( self : Union[str, Any] ): __lowercase : str = '''Hello World!''' __lowercase : Union[str, Any] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6] self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @slow def snake_case ( self : List[str] ): __lowercase : int = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) # fmt: off __lowercase : Tuple = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231 # fmt: on self.assertListEqual(UpperCAmelCase__ , self.big_tokenizer.encode(UpperCAmelCase__ ) ) @require_torch @slow def snake_case ( self : Optional[int] ): import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence __lowercase : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] __lowercase : Dict = ''' '''.join(UpperCAmelCase__ ) __lowercase : Union[str, Any] = self.big_tokenizer.encode_plus(UpperCAmelCase__ , return_tensors="pt" , return_token_type_ids=UpperCAmelCase__ ) __lowercase : Dict = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=UpperCAmelCase__ ) __lowercase : Optional[int] = BigBirdConfig(attention_type="original_full" ) __lowercase : Dict = BigBirdModel(UpperCAmelCase__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**UpperCAmelCase__ ) model(**UpperCAmelCase__ ) @slow def snake_case ( self : Tuple ): __lowercase : Union[str, Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) __lowercase : Dict = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def snake_case ( self : int ): __lowercase : str = {'''input_ids''': [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
700
"""simple docstring""" class lowerCAmelCase__ : """simple docstring""" def __init__( self : int , lowercase__ : List[Any] ): __lowercase : Tuple = val __lowercase : Optional[Any] = None __lowercase : Tuple = None def snake_case ( self : Optional[Any] , lowercase__ : List[str] ): if self.val: if val < self.val: if self.left is None: __lowercase : Tuple = Node(lowercase__ ) else: self.left.insert(lowercase__ ) elif val > self.val: if self.right is None: __lowercase : Any = Node(lowercase__ ) else: self.right.insert(lowercase__ ) else: __lowercase : Optional[Any] = val def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Any: """simple docstring""" if root: inorder(root.left, _lowerCamelCase ) res.append(root.val ) inorder(root.right, _lowerCamelCase ) def snake_case__ ( _lowerCamelCase ) ->Optional[Any]: """simple docstring""" if len(_lowerCamelCase ) == 0: return arr __lowercase : int = Node(arr[0] ) for i in range(1, len(_lowerCamelCase ) ): root.insert(arr[i] ) # Traverse BST in order. __lowercase : Tuple = [] inorder(_lowerCamelCase, _lowerCamelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
281
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A_ = logging.get_logger(__name__) A_ = { "google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json", } class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ ): A_ = "bit" A_ = ["preactivation", "bottleneck"] A_ = ["SAME", "VALID"] def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : Dict=[256, 512, 1024, 2048] , __lowerCamelCase : List[str]=[3, 4, 6, 3] , __lowerCamelCase : Tuple="preactivation" , __lowerCamelCase : Optional[Any]="relu" , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=32 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Any=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[Any] , ): super().__init__(**__lowerCamelCase ) if layer_type not in self.layer_types: raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: snake_case__ : List[Any] = global_padding.upper() else: raise ValueError(F"Padding strategy {global_padding} not supported" ) snake_case__ : Dict = num_channels snake_case__ : str = embedding_size snake_case__ : int = hidden_sizes snake_case__ : Union[str, Any] = depths snake_case__ : Optional[Any] = layer_type snake_case__ : Any = hidden_act snake_case__ : List[str] = global_padding snake_case__ : Optional[Any] = num_groups snake_case__ : Dict = drop_path_rate snake_case__ : Union[str, Any] = embedding_dynamic_padding snake_case__ : int = output_stride snake_case__ : List[str] = width_factor snake_case__ : Optional[int] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )] snake_case__ , snake_case__ : Optional[Any] = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
270
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowercase_ : def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=100 , __lowerCamelCase : str=13 , __lowerCamelCase : str=30 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=10 , __lowerCamelCase : int=0.0_2 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=[0, 1, 2, 3] , ): snake_case__ : Optional[int] = parent snake_case__ : Union[str, Any] = 100 snake_case__ : Union[str, Any] = batch_size snake_case__ : Dict = image_size snake_case__ : Tuple = patch_size snake_case__ : Dict = num_channels snake_case__ : List[Any] = is_training snake_case__ : Optional[Any] = use_labels snake_case__ : Optional[int] = hidden_size snake_case__ : Optional[int] = num_hidden_layers snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Union[str, Any] = intermediate_size snake_case__ : str = hidden_act snake_case__ : Tuple = hidden_dropout_prob snake_case__ : Dict = attention_probs_dropout_prob snake_case__ : List[Any] = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : str = scope snake_case__ : Optional[Any] = out_indices snake_case__ : List[Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : Optional[Any] = (image_size // patch_size) ** 2 snake_case__ : Dict = num_patches + 1 def _lowerCAmelCase ( self : Optional[Any] ): snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Tuple = None snake_case__ : str = None if self.use_labels: snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case__ : str = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase ( self : Optional[int] ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : int ): snake_case__ : Tuple = BeitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() snake_case__ : int = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] ): snake_case__ : Optional[int] = BeitForMaskedImageModeling(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() snake_case__ : Dict = model(__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ): snake_case__ : List[Any] = self.type_sequence_label_size snake_case__ : int = BeitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() snake_case__ : List[Any] = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ : Union[str, Any] = 1 snake_case__ : Any = BeitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ): snake_case__ : str = self.num_labels snake_case__ : int = BeitForSemanticSegmentation(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() snake_case__ : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) snake_case__ : Dict = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowerCAmelCase ( self : List[Any] ): snake_case__ : List[str] = self.prepare_config_and_inputs() snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = config_and_inputs snake_case__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): A_ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) A_ = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) A_ = False A_ = False A_ = False def _lowerCAmelCase ( self : Any ): snake_case__ : Any = BeitModelTester(self ) snake_case__ : List[str] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def _lowerCAmelCase ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def _lowerCAmelCase ( self : Tuple ): pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _lowerCAmelCase ( self : Any ): pass def _lowerCAmelCase ( self : int ): snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[Any] = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) ) def _lowerCAmelCase ( self : int ): snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : str = model_class(__lowerCamelCase ) snake_case__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Union[str, Any] = [*signature.parameters.keys()] snake_case__ : List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def _lowerCAmelCase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def _lowerCAmelCase ( self : List[str] ): snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase ) def _lowerCAmelCase ( self : List[Any] ): snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) def _lowerCAmelCase ( self : List[str] ): snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase ) def _lowerCAmelCase ( self : Dict ): if not self.model_tester.is_training: return snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling]: continue snake_case__ : int = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() snake_case__ : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) snake_case__ : Optional[Any] = model(**__lowerCamelCase ).loss loss.backward() def _lowerCAmelCase ( self : Any ): snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return snake_case__ : List[str] = False snake_case__ : List[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__lowerCamelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue snake_case__ : Any = model_class(__lowerCamelCase ) model.gradient_checkpointing_enable() model.to(__lowerCamelCase ) model.train() snake_case__ : Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) snake_case__ : int = model(**__lowerCamelCase ).loss loss.backward() def _lowerCAmelCase ( self : List[Any] ): snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case__ : Tuple = _config_zero_init(__lowerCamelCase ) for model_class in self.all_model_classes: snake_case__ : List[str] = model_class(config=__lowerCamelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def _lowerCAmelCase ( self : Union[str, Any] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = BeitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def UpperCamelCase__ ( ) -> int: snake_case__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase_ ( unittest.TestCase ): @cached_property def _lowerCAmelCase ( self : List[str] ): return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def _lowerCAmelCase ( self : Dict ): snake_case__ : Tuple = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__lowerCamelCase ) snake_case__ : Any = self.default_image_processor snake_case__ : List[str] = prepare_img() snake_case__ : Any = image_processor(images=__lowerCamelCase , return_tensors='pt' ).pixel_values.to(__lowerCamelCase ) # prepare bool_masked_pos snake_case__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Union[str, Any] = model(pixel_values=__lowerCamelCase , bool_masked_pos=__lowerCamelCase ) snake_case__ : str = outputs.logits # verify the logits snake_case__ : Dict = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , __lowerCamelCase ) snake_case__ : int = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowerCamelCase , atol=1E-2 ) ) @slow def _lowerCAmelCase ( self : Union[str, Any] ): snake_case__ : Optional[int] = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__lowerCamelCase ) snake_case__ : List[str] = self.default_image_processor snake_case__ : int = prepare_img() snake_case__ : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__lowerCamelCase ) snake_case__ : Dict = outputs.logits # verify the logits snake_case__ : List[Any] = torch.Size((1, 1000) ) self.assertEqual(logits.shape , __lowerCamelCase ) snake_case__ : Union[str, Any] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) ) snake_case__ : Any = 281 self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase ) @slow def _lowerCAmelCase ( self : List[Any] ): snake_case__ : Any = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( __lowerCamelCase ) snake_case__ : int = self.default_image_processor snake_case__ : Tuple = prepare_img() snake_case__ : List[Any] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__lowerCamelCase ) snake_case__ : Dict = outputs.logits # verify the logits snake_case__ : Optional[int] = torch.Size((1, 21841) ) self.assertEqual(logits.shape , __lowerCamelCase ) snake_case__ : Dict = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1E-4 ) ) snake_case__ : Optional[Any] = 2396 self.assertEqual(logits.argmax(-1 ).item() , __lowerCamelCase ) @slow def _lowerCAmelCase ( self : Dict ): snake_case__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) snake_case__ : str = model.to(__lowerCamelCase ) snake_case__ : List[str] = BeitImageProcessor(do_resize=__lowerCamelCase , size=640 , do_center_crop=__lowerCamelCase ) snake_case__ : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) snake_case__ : Union[str, Any] = Image.open(ds[0]['file'] ) snake_case__ : Tuple = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : List[str] = model(**__lowerCamelCase ) snake_case__ : Any = outputs.logits # verify the logits snake_case__ : Optional[int] = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , __lowerCamelCase ) snake_case__ : Tuple = version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: snake_case__ : Optional[Any] = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=__lowerCamelCase , ) else: snake_case__ : Union[str, Any] = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=__lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) ) @slow def _lowerCAmelCase ( self : str ): snake_case__ : Tuple = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) snake_case__ : int = model.to(__lowerCamelCase ) snake_case__ : Optional[Any] = BeitImageProcessor(do_resize=__lowerCamelCase , size=640 , do_center_crop=__lowerCamelCase ) snake_case__ : Tuple = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) snake_case__ : Optional[int] = Image.open(ds[0]['file'] ) snake_case__ : Optional[int] = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): snake_case__ : Optional[int] = model(**__lowerCamelCase ) snake_case__ : Tuple = outputs.logits.detach().cpu() snake_case__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(500, 300)] ) snake_case__ : List[Any] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __lowerCamelCase ) snake_case__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase ) snake_case__ : Optional[Any] = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , __lowerCamelCase )
270
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin _lowercase : int =get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right _lowercase : Union[str, Any] =2_5_0_0_0_4 _lowercase : Any =2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' lowercase : Any = MBartaaTokenizer lowercase : Union[str, Any] = MBartaaTokenizerFast lowercase : Dict = True lowercase : str = True def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str: super().setUp() # We have a SentencePiece fixture for testing A : str =MBartaaTokenizer(SCREAMING_SNAKE_CASE__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int: A : Optional[int] ='<s>' A : str =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int: A : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_54 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 10_54 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Tuple: A : List[Any] =MBartaaTokenizer(SCREAMING_SNAKE_CASE__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=SCREAMING_SNAKE_CASE__ ) A : str =tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A : List[str] =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) A : Dict =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) A : Optional[int] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str: # fmt: off A : Union[str, Any] ={'input_ids': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : Dict =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) A : Tuple =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) A : str =tempfile.mkdtemp() A : Optional[Any] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ ) A : Optional[int] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) A : Dict =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Checks everything loads correctly in the same way A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ ) A : Optional[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) # Save tokenizer rust, legacy_format=True A : Union[str, Any] =tempfile.mkdtemp() A : Any =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ ) A : Dict =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Checks everything loads correctly in the same way A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ ) A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) # Save tokenizer rust, legacy_format=False A : Optional[Any] =tempfile.mkdtemp() A : List[Any] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ ) A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A : List[str] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ ) A : Optional[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE__ ) @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): '''simple docstring''' lowercase : List[str] = "facebook/mbart-large-50-one-to-many-mmt" lowercase : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase : str = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase : str = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2] @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int ) -> Dict: A : MBartaaTokenizer =MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' ) A : Any =1 return cls def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_00_04 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_00_20 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_00_38 ) def SCREAMING_SNAKE_CASE_ ( self : str ) -> str: A : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any: self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids ) A : List[str] =[RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2] A : Any =self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) A : str =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any: A : Optional[int] =['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE__ ) A : Tuple =10 A : List[Any] =self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0] self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_00_53, 25_00_01] ) def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any: A : Dict =tempfile.mkdtemp() A : str =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) A : Union[str, Any] =MBartaaTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict: A : Optional[int] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ) A : str =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any: A : Union[str, Any] =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) A : List[str] =shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) A : Dict =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str: A : int =self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors='pt' ) A : Tuple =self.tokenizer( text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors='pt' ) A : Tuple =targets['input_ids'] A : Any =shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any: A : Dict =self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ ) , { # en_XX, A, test, EOS 'input_ids': [[25_00_04, 62, 30_34, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_00_01, } , )
721
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient _lowercase : Optional[Any] =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def A__ ( lowercase: Optional[int] ) -> Optional[int]: A : str =test_results.split(' ' ) A : List[str] =0 A : Tuple =0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. A : List[str] =expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowercase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A__ ( lowercase: List[Any] ) -> str: A : Union[str, Any] ={} A : Optional[Any] =None A : Union[str, Any] =False for line in failures_short_lines.split('\n' ): if re.search(r'_ \[doctest\]', lowercase ): A : List[Any] =True A : Any =line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): A : Dict =line A : List[str] =False return failures class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]: A : Tuple =title A : Dict =doc_test_results['time_spent'].split(',' )[0] A : Union[str, Any] =doc_test_results['success'] A : Any =doc_test_results['failures'] A : Optional[Any] =self.n_success + self.n_failures # Failures and success of the modeling tests A : Union[str, Any] =doc_test_results @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str: A : Any =[self._time_spent] A : List[str] =0 for time in time_spent: A : List[Any] =time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(SCREAMING_SNAKE_CASE__ ) == 1: A : List[str] =[0, 0, time_parts[0]] A , A , A : Tuple =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds A , A , A : str =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f'{int(SCREAMING_SNAKE_CASE__ )}h{int(SCREAMING_SNAKE_CASE__ )}m{int(SCREAMING_SNAKE_CASE__ )}s' @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict: return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict: return { "type": "section", "text": { "type": "plain_text", "text": ( f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in' f' {self.time}.' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict: A : Tuple =40 A : Optional[Any] ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} A : Any ='' for category, failures in category_failures.items(): if len(SCREAMING_SNAKE_CASE__ ) == 0: continue if report != "": report += "\n\n" report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(SCREAMING_SNAKE_CASE__ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'The following examples had failures:\n\n\n{report}\n', }, } @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str: A : Optional[int] =[self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(SCREAMING_SNAKE_CASE__ ) @staticmethod def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: A : Tuple =[ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE__ )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Optional[int]: print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) A : Any =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.' A : Dict =client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE__ , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: A : List[str] ='' for key, value in failures.items(): A : Any =value[:2_00] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE__ ) > 2_50 else value failures_text += f'*{key}*\n_{value}_\n\n' A : Union[str, Any] =job_name A : Any ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: A : int ={ 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]: if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) A : Union[str, Any] =self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) A : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE__ : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): A : Any =f'*Num failures* :{len(job_result["failed"] )} \n' A : List[Any] =job_result['failures'] A : Any =self.get_reply_blocks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , text=SCREAMING_SNAKE_CASE__ ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f'Results for {job}' , blocks=SCREAMING_SNAKE_CASE__ , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def A__ ( ) -> Union[str, Any]: A : Any =os.environ['GITHUB_RUN_ID'] A : List[Any] =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100' A : Union[str, Any] =requests.get(lowercase ).json() A : List[Any] ={} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) A : List[str] =math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowercase ): A : List[str] =requests.get(url + F'&page={i + 2}' ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.', lowercase ) return {} def A__ ( lowercase: str ) -> Optional[Any]: A : Any ={} if os.path.exists(lowercase ): A : List[Any] =os.listdir(lowercase ) for file in files: try: with open(os.path.join(lowercase, lowercase ), encoding='utf-8' ) as f: A : Optional[int] =f.read() except UnicodeDecodeError as e: raise ValueError(F'Could not open {os.path.join(lowercase, lowercase )}.' ) from e return _artifact def A__ ( ) -> int: class SCREAMING_SNAKE_CASE_ : '''simple docstring''' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: A : Dict =name A : Dict =[] def __str__( self : Optional[Any] ) -> List[str]: return self.name def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: self.paths.append({'name': self.name, 'path': path} ) A : Dict[str, Artifact] ={} A : str =filter(os.path.isdir, os.listdir() ) for directory in directories: A : Tuple =directory if artifact_name not in _available_artifacts: A : int =Artifact(lowercase ) _available_artifacts[artifact_name].add_path(lowercase ) return _available_artifacts if __name__ == "__main__": _lowercase : Optional[int] =get_job_links() _lowercase : str =retrieve_available_artifacts() _lowercase : List[Any] =collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' _lowercase : Optional[Any] ={ v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job _lowercase : List[Any] =github_actions_job_links.get('''run_doctests''') _lowercase : int =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] _lowercase : Dict =retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: _lowercase , _lowercase , _lowercase : List[Any] =handle_test_results(artifact['''stats''']) _lowercase : Any =failed _lowercase : Union[str, Any] =success _lowercase : str =time_spent[1:-1] + ''', ''' _lowercase : Any =extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): _lowercase : Tuple =line.replace('''FAILED ''', '''''') _lowercase : int =line.split()[0].replace('''\n''', '''''') if "::" in line: _lowercase , _lowercase : str =line.split('''::''') else: _lowercase , _lowercase : Union[str, Any] =line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): _lowercase : Any =docs[file_regex] doc_test_results[category]["failed"].append(test) _lowercase : Any =all_failures[test] if test in all_failures else '''N/A''' _lowercase : Tuple =failure break _lowercase : Optional[int] =Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
661
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase_ : List[str] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCAmelCase_ : Optional[Any] = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : str )-> int: '''simple docstring''' for attribute in key.split('''.''' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __snake_case = '''lm_head''' __snake_case = getattr(_lowerCamelCase , _lowerCamelCase ) if weight_type is not None: __snake_case = getattr(_lowerCamelCase , _lowerCamelCase ).shape else: __snake_case = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value else: __snake_case = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : str )-> str: '''simple docstring''' __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case = True else: for key, mapped_key in MAPPING.items(): __snake_case = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __snake_case = True if "*" in mapped_key: __snake_case = name.split(_lowerCamelCase )[0].split('''.''' )[-2] __snake_case = mapped_key.replace('''*''' , _lowerCamelCase ) if "weight_g" in name: __snake_case = '''weight_g''' elif "weight_v" in name: __snake_case = '''weight_v''' elif "bias" in name: __snake_case = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = '''weight''' else: __snake_case = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) continue if not is_used: unused_weights.append(_lowerCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _UpperCamelCase (_lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] )-> List[str]: '''simple docstring''' __snake_case = full_name.split('''conv_layers.''' )[-1] __snake_case = name.split('''.''' ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __snake_case = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowerCamelCase ) @torch.no_grad() def _UpperCamelCase (_lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=True )-> Optional[int]: '''simple docstring''' if config_path is not None: __snake_case = UniSpeechConfig.from_pretrained(_lowerCamelCase ) else: __snake_case = UniSpeechConfig() if is_finetuned: if dict_path: __snake_case = Dictionary.load_from_json(_lowerCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case = target_dict.pad_index __snake_case = target_dict.bos_index __snake_case = target_dict.eos_index __snake_case = len(target_dict.symbols ) __snake_case = os.path.join(_lowerCamelCase , '''vocab.json''' ) if not os.path.isdir(_lowerCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowerCamelCase ) ) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase ) __snake_case = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case = 42 __snake_case = 43 with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase ) __snake_case = WavaVecaPhonemeCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowerCamelCase , ) __snake_case = True if config.feat_extract_norm == '''layer''' else False __snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) __snake_case = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase ) processor.save_pretrained(_lowerCamelCase ) __snake_case = UniSpeechForCTC(_lowerCamelCase ) else: __snake_case = UniSpeechForPreTraining(_lowerCamelCase ) if is_finetuned: __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} ) else: __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __snake_case = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) hf_unispeech.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCAmelCase_ : Union[str, Any] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
24
from collections import defaultdict from math import gcd def lowercase_ ( __snake_case : int = 1_50_00_00 ) -> int: '''simple docstring''' snake_case__ :defaultdict = defaultdict(__snake_case ) snake_case__ :List[Any] = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue snake_case__ :Any = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'''{solution() = }''')
241
0
"""simple docstring""" from __future__ import annotations import time A__ : str = list[tuple[int, int]] A__ : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A__ : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class lowercase__ : '''simple docstring''' def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Node | None ): lowerCamelCase_ : Optional[int] =pos_x lowerCamelCase_ : Dict =pos_y lowerCamelCase_ : int =(pos_y, pos_x) lowerCamelCase_ : Optional[int] =goal_x lowerCamelCase_ : Tuple =goal_y lowerCamelCase_ : str =parent class lowercase__ : '''simple docstring''' def __init__( self : List[Any] , snake_case__ : tuple[int, int] , snake_case__ : tuple[int, int] ): lowerCamelCase_ : Tuple =Node(start[1] , start[0] , goal[1] , goal[0] , __A ) lowerCamelCase_ : Tuple =Node(goal[1] , goal[0] , goal[1] , goal[0] , __A ) lowerCamelCase_ : int =[self.start] lowerCamelCase_ : Union[str, Any] =False def UpperCAmelCase__ ( self : Dict ): while self.node_queue: lowerCamelCase_ : Optional[Any] =self.node_queue.pop(0 ) if current_node.pos == self.target.pos: lowerCamelCase_ : Optional[Any] =True return self.retrace_path(__A ) lowerCamelCase_ : int =self.get_successors(__A ) for node in successors: self.node_queue.append(__A ) if not self.reached: return [self.start.pos] return None def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Node ): lowerCamelCase_ : str =[] for action in delta: lowerCamelCase_ : str =parent.pos_x + action[1] lowerCamelCase_ : Union[str, Any] =parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) ) return successors def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Node | None ): lowerCamelCase_ : Tuple =node lowerCamelCase_ : Any =[] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCamelCase_ : Tuple =current_node.parent path.reverse() return path class lowercase__ : '''simple docstring''' def __init__( self : Dict , snake_case__ : str , snake_case__ : int ): lowerCamelCase_ : str =BreadthFirstSearch(__A , __A ) lowerCamelCase_ : int =BreadthFirstSearch(__A , __A ) lowerCamelCase_ : Tuple =False def UpperCAmelCase__ ( self : Optional[Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: lowerCamelCase_ : Any =self.fwd_bfs.node_queue.pop(0 ) lowerCamelCase_ : List[str] =self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: lowerCamelCase_ : List[str] =True return self.retrace_bidirectional_path( __A , __A ) lowerCamelCase_ : Union[str, Any] =current_bwd_node lowerCamelCase_ : Dict =current_fwd_node lowerCamelCase_ : List[Any] ={ self.fwd_bfs: self.fwd_bfs.get_successors(__A ), self.bwd_bfs: self.bwd_bfs.get_successors(__A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def UpperCAmelCase__ ( self : Any , snake_case__ : Node , snake_case__ : Node ): lowerCamelCase_ : List[str] =self.fwd_bfs.retrace_path(__A ) lowerCamelCase_ : Optional[Any] =self.bwd_bfs.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() lowerCamelCase_ : List[Any] =fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() A__ : str = (0, 0) A__ : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A__ : Any = time.time() A__ : Optional[Any] = BreadthFirstSearch(init, goal) A__ : str = bfs.search() A__ : Optional[Any] = time.time() - start_bfs_time print('Unidirectional BFS computation time : ', bfs_time) A__ : Optional[Any] = time.time() A__ : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) A__ : str = bd_bfs.search() A__ : Optional[Any] = time.time() - start_bd_bfs_time print('Bidirectional BFS computation time : ', bd_bfs_time)
703
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class lowercase__ : def __init__( self : List[str] , snake_case__ : str = "cpu" , snake_case__ : str = "openai/clip-vit-large-patch14" ): lowerCamelCase_ : Union[str, Any] =device lowerCamelCase_ : Union[str, Any] =CLIPTokenizerFast.from_pretrained(snake_case__ ) lowerCamelCase_ : List[str] =[0.48_145_466, 0.4_578_275, 0.40_821_073] lowerCamelCase_ : str =[0.26_862_954, 0.26_130_258, 0.27_577_711] lowerCamelCase_ : Dict =torchvision.transforms.Normalize(self.image_mean , self.image_std ) lowerCamelCase_ : Tuple =torchvision.transforms.Resize(224 ) lowerCamelCase_ : str =torchvision.transforms.CenterCrop(224 ) def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any ): lowerCamelCase_ : List[Any] =self.resize(snake_case__ ) lowerCamelCase_ : Union[str, Any] =self.center_crop(snake_case__ ) lowerCamelCase_ : str =self.normalize(snake_case__ ) return images def __call__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : str=None , **snake_case__ : Dict ): lowerCamelCase_ : List[str] =self.tokenizer(text=snake_case__ , **snake_case__ ) lowerCamelCase_ : Optional[int] =self.preprocess_img(snake_case__ ) lowerCamelCase_ : List[Any] ={key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class lowercase__ ( nn.Module ): def __init__( self : str , snake_case__ : Any=10 , snake_case__ : Optional[int]=0.01 , snake_case__ : Optional[Any]=None , snake_case__ : str=None , snake_case__ : Optional[Any]=None , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=False , snake_case__ : Dict=True , snake_case__ : List[str]="image" , snake_case__ : int=True , snake_case__ : List[Any]=False , snake_case__ : Optional[int]=False , snake_case__ : int=False , ): super().__init__() lowerCamelCase_ : List[Any] =None lowerCamelCase_ : Optional[Any] =device if device else get_device() if vqgan: lowerCamelCase_ : List[str] =vqgan else: lowerCamelCase_ : Optional[Any] =load_vqgan(self.device , conf_path=snake_case__ , ckpt_path=snake_case__ ) self.vqgan.eval() if clip: lowerCamelCase_ : Optional[int] =clip else: lowerCamelCase_ : Optional[Any] =CLIPModel.from_pretrained("openai/clip-vit-base-patch32" ) self.clip.to(self.device ) lowerCamelCase_ : Union[str, Any] =ProcessorGradientFlow(device=self.device ) lowerCamelCase_ : Optional[Any] =iterations lowerCamelCase_ : Union[str, Any] =lr lowerCamelCase_ : int =log lowerCamelCase_ : Dict =make_grid lowerCamelCase_ : Union[str, Any] =return_val lowerCamelCase_ : List[Any] =quantize lowerCamelCase_ : Optional[int] =self.vqgan.decoder.z_shape def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int]=None , snake_case__ : Any=None , snake_case__ : Tuple=5 , snake_case__ : str=True ): lowerCamelCase_ : Tuple =[] if output_path is None: lowerCamelCase_ : str ="./animation.gif" if input_path is None: lowerCamelCase_ : Any =self.save_path lowerCamelCase_ : Union[str, Any] =sorted(glob(input_path + "/*" ) ) if not len(snake_case__ ): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)" ) if len(snake_case__ ) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" ) lowerCamelCase_ : str =total_duration / len(snake_case__ ) lowerCamelCase_ : List[str] =[frame_duration] * len(snake_case__ ) if extend_frames: lowerCamelCase_ : int =1.5 lowerCamelCase_ : str =3 for file_name in paths: if file_name.endswith(".png" ): images.append(imageio.imread(snake_case__ ) ) imageio.mimsave(snake_case__ , snake_case__ , duration=snake_case__ ) print(F"""gif saved to {output_path}""" ) def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[int]=None , snake_case__ : List[str]=None ): if not (path or img): raise ValueError("Input either path or tensor" ) if img is not None: raise NotImplementedError lowerCamelCase_ : Union[str, Any] =preprocess(Image.open(snake_case__ ) , target_image_size=256 ).to(self.device ) lowerCamelCase_ : List[str] =preprocess_vqgan(snake_case__ ) lowerCamelCase_ , *lowerCamelCase_ : List[Any] =self.vqgan.encode(snake_case__ ) return z def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Tuple ): lowerCamelCase_ : List[str] =self.latent.detach().requires_grad_() lowerCamelCase_ : Optional[int] =base_latent + transform_vector if self.quantize: lowerCamelCase_ , *lowerCamelCase_ : Optional[int] =self.vqgan.quantize(snake_case__ ) else: lowerCamelCase_ : Optional[Any] =trans_latent return self.vqgan.decode(snake_case__ ) def UpperCAmelCase__ ( self : Any , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : int=None ): lowerCamelCase_ : int =self.clip_preprocessor(text=snake_case__ , images=snake_case__ , return_tensors="pt" , padding=snake_case__ ) lowerCamelCase_ : int =self.clip(**snake_case__ ) lowerCamelCase_ : Optional[Any] =clip_outputs.logits_per_image if weights is not None: lowerCamelCase_ : str =similarity_logits * weights return similarity_logits.sum() def UpperCAmelCase__ ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ): lowerCamelCase_ : int =self._get_clip_similarity(pos_prompts["prompts"] , snake_case__ , weights=(1 / pos_prompts["weights"]) ) if neg_prompts: lowerCamelCase_ : str =self._get_clip_similarity(neg_prompts["prompts"] , snake_case__ , weights=neg_prompts["weights"] ) else: lowerCamelCase_ : List[Any] =torch.tensor([1] , device=self.device ) lowerCamelCase_ : Optional[int] =-torch.log(snake_case__ ) + torch.log(snake_case__ ) return loss def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] ): lowerCamelCase_ : int =torch.randn_like(self.latent , requires_grad=snake_case__ , device=self.device ) lowerCamelCase_ : Optional[int] =torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() lowerCamelCase_ : Any =self._add_vector(snake_case__ ) lowerCamelCase_ : Any =loop_post_process(snake_case__ ) lowerCamelCase_ : Optional[Any] =self._get_CLIP_loss(snake_case__ , snake_case__ , snake_case__ ) print("CLIP loss" , snake_case__ ) if self.log: wandb.log({"CLIP Loss": clip_loss} ) clip_loss.backward(retain_graph=snake_case__ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ): wandb.init(reinit=snake_case__ , project="face-editor" ) wandb.config.update({"Positive Prompts": positive_prompts} ) wandb.config.update({"Negative Prompts": negative_prompts} ) wandb.config.update({"lr": self.lr, "iterations": self.iterations} ) if image_path: lowerCamelCase_ : Any =Image.open(snake_case__ ) lowerCamelCase_ : List[str] =image.resize((256, 256) ) wandb.log("Original Image" , wandb.Image(snake_case__ ) ) def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict ): if not prompts: return [] lowerCamelCase_ : Any =[] lowerCamelCase_ : int =[] if isinstance(snake_case__ , snake_case__ ): lowerCamelCase_ : List[Any] =[prompt.strip() for prompt in prompts.split("|" )] for prompt in prompts: if isinstance(snake_case__ , (tuple, list) ): lowerCamelCase_ : Optional[Any] =prompt[0] lowerCamelCase_ : List[str] =float(prompt[1] ) elif ":" in prompt: lowerCamelCase_ , lowerCamelCase_ : List[str] =prompt.split(":" ) lowerCamelCase_ : Optional[int] =float(snake_case__ ) else: lowerCamelCase_ : int =prompt lowerCamelCase_ : Tuple =1.0 processed_prompts.append(snake_case__ ) weights.append(snake_case__ ) return { "prompts": processed_prompts, "weights": torch.tensor(snake_case__ , device=self.device ), } def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=True , snake_case__ : Tuple=False , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=None , ): if image_path: lowerCamelCase_ : Dict =self._get_latent(snake_case__ ) else: lowerCamelCase_ : List[str] =torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(snake_case__ , snake_case__ , snake_case__ ) assert pos_prompts, "You must provide at least one positive prompt." lowerCamelCase_ : Optional[Any] =self.process_prompts(snake_case__ ) lowerCamelCase_ : Optional[int] =self.process_prompts(snake_case__ ) if save_final and save_path is None: lowerCamelCase_ : str =os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) ) if not os.path.exists(snake_case__ ): os.makedirs(snake_case__ ) else: lowerCamelCase_ : Optional[Any] =save_path + "_" + get_timestamp() os.makedirs(snake_case__ ) lowerCamelCase_ : Any =save_path lowerCamelCase_ : List[Any] =self.vqgan.decode(self.latent )[0] if show_intermediate: print("Original Image" ) show_pil(custom_to_pil(snake_case__ ) ) lowerCamelCase_ : Optional[Any] =loop_post_process(snake_case__ ) for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case__ , snake_case__ , snake_case__ ) ): if show_intermediate: show_pil(snake_case__ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) ) if self.log: wandb.log({"Image": wandb.Image(snake_case__ )} ) if show_final: show_pil(snake_case__ ) if save_final: transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
244
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int: while second != 0: lowerCamelCase_ = first & second first ^= second lowerCamelCase_ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() A_ = int(input("Enter the first number: ").strip()) A_ = int(input("Enter the second number: ").strip()) print(f'''{add(first, second) = }''')
42
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() A_ : str =logging.get_logger(__name__) A_ : Any ="""https://openaipublic.azureedge.net/jukebox/models/""" A_ : Dict ={ """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Optional[Any]: if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: _lowerCamelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: _lowerCamelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: _lowerCamelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: _lowerCamelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: _lowerCamelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: _lowerCamelCase = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: _lowerCamelCase = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: _lowerCamelCase = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple , snake_case : Optional[int] , snake_case : int , snake_case : Union[str, Any] )-> Dict: _lowerCamelCase = {} import re _lowerCamelCase = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) _lowerCamelCase = re.compile( r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) _lowerCamelCase = re.compile( r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) _lowerCamelCase = re.compile( r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(snake_case ): _lowerCamelCase = re_encoder_block_conv_in.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) _lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' _lowerCamelCase = re_encoder_block_conv_in.sub(snake_case , snake_case ) elif re_encoder_block_resnet.fullmatch(snake_case ): _lowerCamelCase = re_encoder_block_resnet.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) _lowerCamelCase = {'1': 1, '3': 2}[groups[-2]] _lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' _lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' _lowerCamelCase = prefix + resnet_block _lowerCamelCase = re_encoder_block_resnet.sub(snake_case , snake_case ) elif re_encoder_block_proj_out.fullmatch(snake_case ): _lowerCamelCase = re_encoder_block_proj_out.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' _lowerCamelCase = re_encoder_block_proj_out.sub(snake_case , snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(snake_case ): _lowerCamelCase = re_decoder_block_conv_out.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' _lowerCamelCase = re_decoder_block_conv_out.sub(snake_case , snake_case ) elif re_decoder_block_resnet.fullmatch(snake_case ): _lowerCamelCase = re_decoder_block_resnet.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCamelCase = {'1': 1, '3': 2}[groups[-2]] _lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' _lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' _lowerCamelCase = prefix + resnet_block _lowerCamelCase = re_decoder_block_resnet.sub(snake_case , snake_case ) elif re_decoder_block_proj_in.fullmatch(snake_case ): _lowerCamelCase = re_decoder_block_proj_in.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' _lowerCamelCase = re_decoder_block_proj_in.sub(snake_case , snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(snake_case ): _lowerCamelCase = re_prior_cond_conv_out.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCamelCase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' _lowerCamelCase = re_prior_cond_conv_out.sub(snake_case , snake_case ) elif re_prior_cond_resnet.fullmatch(snake_case ): _lowerCamelCase = re_prior_cond_resnet.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCamelCase = {'1': 1, '3': 2}[groups[-2]] _lowerCamelCase = f'conditioner_blocks.upsampler.upsample_block.{block_index}.' _lowerCamelCase = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' _lowerCamelCase = prefix + resnet_block _lowerCamelCase = re_prior_cond_resnet.sub(snake_case , snake_case ) elif re_prior_cond_proj_in.fullmatch(snake_case ): _lowerCamelCase = re_prior_cond_proj_in.match(snake_case ) _lowerCamelCase = regex_match.groups() _lowerCamelCase = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' _lowerCamelCase = re_prior_cond_proj_in.sub(snake_case , snake_case ) # keep original key else: _lowerCamelCase = original_key _lowerCamelCase = replace_key(snake_case ) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: _lowerCamelCase = model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) _lowerCamelCase = original_key _lowerCamelCase = original_key _lowerCamelCase = value return new_dict @torch.no_grad() def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any]=None , snake_case : List[str]=None )-> Union[str, Any]: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): _lowerCamelCase = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case ) os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case ) open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content ) _lowerCamelCase = MODEL_MAPPING[model_name.split('/' )[-1]] _lowerCamelCase = JukeboxConfig.from_pretrained(snake_case ) _lowerCamelCase = JukeboxModel(snake_case ) _lowerCamelCase = [] _lowerCamelCase = {} for i, dict_name in enumerate(snake_case ): _lowerCamelCase = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model'] _lowerCamelCase = {} for k in old_dic.keys(): if k.endswith('.b' ): _lowerCamelCase = old_dic[k] elif k.endswith('.w' ): _lowerCamelCase = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: _lowerCamelCase = old_dic[k] else: _lowerCamelCase = old_dic[k] _lowerCamelCase = 'vqvae' if i == 0 else f'priors.{3 - i}' _lowerCamelCase = fix_jukebox_keys(snake_case , model.state_dict() , snake_case , snake_case ) weight_dict.append(snake_case ) _lowerCamelCase = weight_dict.pop(0 ) model.vqvae.load_state_dict(snake_case ) for i in range(len(snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(snake_case ).mkdir(exist_ok=snake_case ) with open(f'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile: json.dump(snake_case , snake_case ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) return weight_dict if __name__ == "__main__": A_ : Optional[int] =argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) A_ : Optional[int] =parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
650
0
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray: '''simple docstring''' if (ksize % 2) == 0: _A= ksize + 1 _A= np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(lowerCAmelCase_ ): for x in range(lowerCAmelCase_ ): # distance from center _A= x - ksize // 2 _A= y - ksize // 2 # degree to radiant _A= theta / 1_80 * np.pi _A= np.cos(_theta ) _A= np.sin(_theta ) # get kernel x _A= cos_theta * px + sin_theta * py # get kernel y _A= -sin_theta * px + cos_theta * py # fill kernel _A= np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image UpperCAmelCase_ = imread('''../image_data/lena.jpg''') # turn image in gray scale value UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges UpperCAmelCase_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: UpperCAmelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) UpperCAmelCase_ = out / out.max() * 255 UpperCAmelCase_ = out.astype(np.uinta) imshow('''Original''', gray) imshow('''Gabor filter with 20x20 mask and 6 directions''', out) waitKey(0)
712
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase ( unittest.TestCase ): def a__ ( self ): _A= torch.nn.Linear(10 , 10 ) _A= torch.optim.SGD(model.parameters() , 0.1 ) _A= Accelerator() _A= accelerator.prepare(lowerCAmelCase__ ) try: pickle.loads(pickle.dumps(lowerCAmelCase__ ) ) except Exception as e: self.fail(f"Accelerated optimizer pickling failed with {e}" ) AcceleratorState._reset_state()
476
0
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCAmelCase_ : List[str] = "src/transformers" UpperCAmelCase_ : List[str] = "docs/source/en" UpperCAmelCase_ : str = "." def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __magic_name__ : Optional[int] =f.readlines() # Find the start prompt. __magic_name__ : Dict =0 while not lines[start_index].startswith(lowerCamelCase ): start_index += 1 start_index += 1 __magic_name__ : Union[str, Any] =start_index while not lines[end_index].startswith(lowerCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCAmelCase_ : Any = "Model|Encoder|Decoder|ForConditionalGeneration" # Regexes that match TF/Flax/PT model names. UpperCAmelCase_ : Any = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") UpperCAmelCase_ : Tuple = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCAmelCase_ : Optional[int] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase_ : Tuple = direct_transformers_import(TRANSFORMERS_PATH) def lowerCAmelCase_ ( lowerCamelCase ): __magic_name__ : int =re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowerCamelCase ) return [m.group(0 ) for m in matches] def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): __magic_name__ : Any =2 if text == """✅""" or text == """❌""" else len(lowerCamelCase ) __magic_name__ : Optional[Any] =(width - text_length) // 2 __magic_name__ : str =width - text_length - left_indent return " " * left_indent + text + " " * right_indent def lowerCAmelCase_ ( ): __magic_name__ : Union[str, Any] =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES __magic_name__ : Optional[int] ={ name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } __magic_name__ : Union[str, Any] ={name: config.replace("""Config""" , """""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. __magic_name__ : Any =collections.defaultdict(lowerCamelCase ) __magic_name__ : Union[str, Any] =collections.defaultdict(lowerCamelCase ) __magic_name__ : List[Any] =collections.defaultdict(lowerCamelCase ) __magic_name__ : int =collections.defaultdict(lowerCamelCase ) __magic_name__ : Dict =collections.defaultdict(lowerCamelCase ) # Let's lookup through all transformers object (once). for attr_name in dir(lowerCamelCase ): __magic_name__ : Dict =None if attr_name.endswith("""Tokenizer""" ): __magic_name__ : Optional[Any] =slow_tokenizers __magic_name__ : str =attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): __magic_name__ : Tuple =fast_tokenizers __magic_name__ : Dict =attr_name[:-13] elif _re_tf_models.match(lowerCamelCase ) is not None: __magic_name__ : List[str] =tf_models __magic_name__ : List[str] =_re_tf_models.match(lowerCamelCase ).groups()[0] elif _re_flax_models.match(lowerCamelCase ) is not None: __magic_name__ : Tuple =flax_models __magic_name__ : Tuple =_re_flax_models.match(lowerCamelCase ).groups()[0] elif _re_pt_models.match(lowerCamelCase ) is not None: __magic_name__ : List[Any] =pt_models __magic_name__ : Any =_re_pt_models.match(lowerCamelCase ).groups()[0] if lookup_dict is not None: while len(lowerCamelCase ) > 0: if attr_name in model_name_to_prefix.values(): __magic_name__ : Optional[int] =True break # Try again after removing the last word in the name __magic_name__ : Union[str, Any] ="""""".join(camel_case_split(lowerCamelCase )[:-1] ) # Let's build that table! __magic_name__ : List[str] =list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) __magic_name__ : Union[str, Any] =["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). __magic_name__ : Optional[int] =[len(lowerCamelCase ) + 2 for c in columns] __magic_name__ : int =max([len(lowerCamelCase ) for name in model_names] ) + 2 # Build the table per se __magic_name__ : List[Any] ="""|""" + """|""".join([_center_text(lowerCamelCase , lowerCamelCase ) for c, w in zip(lowerCamelCase , lowerCamelCase )] ) + """|\n""" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" __magic_name__ : Optional[int] ={True: """✅""", False: """❌"""} for name in model_names: __magic_name__ : Optional[Any] =model_name_to_prefix[name] __magic_name__ : Optional[Any] =[ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(lowerCamelCase , lowerCamelCase ) for l, w in zip(lowerCamelCase , lowerCamelCase )] ) + "|\n" return table def lowerCAmelCase_ ( lowerCamelCase=False ): __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] =_find_text_in_file( filename=os.path.join(lowerCamelCase , """index.md""" ) , start_prompt="""<!--This table is updated automatically from the auto modules""" , end_prompt="""<!-- End table-->""" , ) __magic_name__ : Any =get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(lowerCamelCase , """index.md""" ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") UpperCAmelCase_ : Dict = parser.parse_args() check_model_table(args.fix_and_overwrite)
21
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[int] = logging.get_logger(__name__) A : Tuple = '''▁''' A : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} A : str = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } A : Any = { '''facebook/mbart-large-50-one-to-many-mmt''': 1024, } # fmt: off A : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class __lowerCamelCase ( a_ ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = PRETRAINED_VOCAB_FILES_MAP a = ["input_ids", "attention_mask"] a = [] a = [] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Dict="<s>" , SCREAMING_SNAKE_CASE : int="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ): # Mask token behave like a normal word, i.e. include the space before it _A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token _A : Dict = {} if sp_model_kwargs is None else sp_model_kwargs _A : Optional[int] = kwargs.get('additional_special_tokens' , []) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(SCREAMING_SNAKE_CASE)) _A : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _A : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _A : List[Any] = 1 _A : List[Any] = len(self.sp_model) _A : Union[str, Any] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE) } _A : Dict = {v: k for k, v in self.lang_code_to_id.items()} _A : List[str] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) _A : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _A : str = src_lang if src_lang is not None else 'en_XX' _A : Any = self.lang_code_to_id[self._src_lang] _A : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def A ( self : Optional[Any]): return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def A ( self : Union[str, Any]): return self._src_lang @src_lang.setter def A ( self : List[str] , SCREAMING_SNAKE_CASE : str): _A : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self : Dict): _A : int = self.__dict__.copy() _A : Optional[Any] = None return state def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict): _A : Tuple = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _A : Any = {} _A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def A ( self : Any): _A : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def A ( self : int , SCREAMING_SNAKE_CASE : str): return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE) def A ( self : List[str] , SCREAMING_SNAKE_CASE : str): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _A : str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]): _A : Tuple = [] _A : Any = '' _A : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token _A : Any = True _A : Dict = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE) _A : Tuple = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) return out_string.strip() def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None): if not os.path.isdir(SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return _A : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE , 'wb') as fi: _A : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE) return (out_vocab_file,) def A ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE) _A : str = [1] * len(self.prefix_tokens) _A : Optional[Any] = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : str): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model') _A : Optional[Any] = src_lang _A : Optional[int] = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _A : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) _A : List[str] = tgt_lang_id return inputs def A ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str = "en_XX" , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : str = "ro_RO" , **SCREAMING_SNAKE_CASE : Optional[Any] , ): _A : Any = src_lang _A : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) def A ( self : List[Any]): return self.set_src_lang_special_tokens(self.src_lang) def A ( self : Any): return self.set_tgt_lang_special_tokens(self.tgt_lang) def A ( self : Tuple , SCREAMING_SNAKE_CASE : str): _A : Optional[int] = self.lang_code_to_id[src_lang] _A : Dict = [self.cur_lang_code_id] _A : List[str] = [self.eos_token_id] def A ( self : int , SCREAMING_SNAKE_CASE : str): _A : str = self.lang_code_to_id[tgt_lang] _A : int = [self.cur_lang_code_id] _A : str = [self.eos_token_id]
128
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_nllb_moe': [ 'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NllbMoeConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST', 'NllbMoeForConditionalGeneration', 'NllbMoeModel', 'NllbMoePreTrainedModel', 'NllbMoeTop2Router', 'NllbMoeSparseMLP', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
210
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class snake_case : a_ : List[str] a_ : Optional[str] = None # Automatically constructed a_ : ClassVar[str] = "dict" a_ : ClassVar[Any] = None a_ : str = field(default="""Translation""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ ) def __call__( self) ->Tuple: return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) def UpperCAmelCase__ ( self) ->Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("string") for k in sorted(self.languages)} @dataclass class snake_case : a_ : Optional[List] = None a_ : Optional[int] = None a_ : Optional[str] = None # Automatically constructed a_ : ClassVar[str] = "dict" a_ : ClassVar[Any] = None a_ : str = field(default="""TranslationVariableLanguages""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase__ ( self) ->Optional[int]: a_ = sorted(set(self.languages)) if self.languages else None a_ = len(self.languages) if self.languages else None def __call__( self) ->Any: return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())}) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->int: a_ = set(self.languages) if self.languages and set(__UpperCAmelCase) - lang_set: raise ValueError( F'''Some languages in example ({", ".join(sorted(set(__UpperCAmelCase) - lang_set))}) are not in valid set ({", ".join(__UpperCAmelCase)}).''') # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. a_ = [] for lang, text in translation_dict.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase): translation_tuples.append((lang, text)) else: translation_tuples.extend([(lang, el) for el in text]) # Ensure translations are in ascending order by language code. a_ , a_ = zip(*sorted(__UpperCAmelCase)) return {"language": languages, "translation": translations} def UpperCAmelCase__ ( self) ->Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("string")), "translation": Sequence(Value("string")), }
210
1
'''simple docstring''' import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ,unittest.TestCase ): lowerCAmelCase__ : Optional[int] = CodeGenTokenizer lowerCAmelCase__ : List[Any] = CodeGenTokenizerFast lowerCAmelCase__ : int = True lowerCAmelCase__ : List[str] = {"add_prefix_space": True} lowerCAmelCase__ : int = False def __a ( self : Optional[Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a__ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", "<|endoftext|>", ] a__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) ) a__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a__ = {"unk_token": "<unk>"} a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase ) ) def __a ( self : int , **lowerCamelCase : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase ) def __a ( self : List[str] , **lowerCamelCase : Optional[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase ) def __a ( self : List[Any] , lowerCamelCase : Optional[int] ): '''simple docstring''' a__ = "lower newer" a__ = "lower newer" return input_text, output_text def __a ( self : Union[str, Any] ): '''simple docstring''' a__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ = "lower newer" a__ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"] a__ = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) a__ = tokens + [tokenizer.unk_token] a__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase ) def __a ( self : Optional[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return a__ = self.get_tokenizer() a__ = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase ) a__ = "lower newer" # Testing tokenization a__ = tokenizer.tokenize(lowerCamelCase , add_prefix_space=lowerCamelCase ) a__ = rust_tokenizer.tokenize(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) # Testing conversion to ids without special tokens a__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase , add_prefix_space=lowerCamelCase ) a__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) # Testing conversion to ids with special tokens a__ = self.get_rust_tokenizer(add_prefix_space=lowerCamelCase ) a__ = tokenizer.encode(lowerCamelCase , add_prefix_space=lowerCamelCase ) a__ = rust_tokenizer.encode(lowerCamelCase ) self.assertListEqual(lowerCamelCase , lowerCamelCase ) # Testing the unknown token a__ = tokens + [rust_tokenizer.unk_token] a__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase ) def __a ( self : List[str] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ): '''simple docstring''' # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __a ( self : Tuple , lowerCamelCase : str=1_5 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase ) # Simple input a__ = "This is a simple input" a__ = ["This is a simple input 1", "This is a simple input 2"] a__ = ("This is a simple input", "This is a pair") a__ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" ) # Simple input self.assertRaises( lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , ) # Pair input self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" ) # Pair input self.assertRaises( lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , ) def __a ( self : Optional[int] ): '''simple docstring''' a__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" ) # Simple input a__ = "This is a simple input" a__ = ["This is a simple input looooooooong", "This is a simple input"] a__ = ("This is a simple input", "This is a pair") a__ = [ ("This is a simple input loooooong", "This is a simple input"), ("This is a simple pair loooooong", "This is a simple pair"), ] a__ = tokenizer.pad_token_id a__ = tokenizer(lowerCamelCase , padding="max_length" , max_length=3_0 , return_tensors="np" ) a__ = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors="np" ) a__ = tokenizer(*lowerCamelCase , padding="max_length" , max_length=6_0 , return_tensors="np" ) a__ = tokenizer(lowerCamelCase , padding=lowerCamelCase , truncate=lowerCamelCase , return_tensors="np" ) # s # test single string max_length padding self.assertEqual(out_s["input_ids"].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s["input_ids"] ) self.assertTrue(0 in out_s["attention_mask"] ) # s2 # test automatic padding self.assertEqual(out_sa["input_ids"].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa["input_ids"][0] ) self.assertFalse(0 in out_sa["attention_mask"][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa["input_ids"][1] ) self.assertTrue(0 in out_sa["attention_mask"][1] ) # p # test single pair max_length padding self.assertEqual(out_p["input_ids"].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p["input_ids"] ) self.assertTrue(0 in out_p["attention_mask"] ) # p2 # test automatic padding pair self.assertEqual(out_pa["input_ids"].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa["input_ids"][0] ) self.assertFalse(0 in out_pa["attention_mask"][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa["input_ids"][1] ) self.assertTrue(0 in out_pa["attention_mask"][1] ) def __a ( self : Union[str, Any] ): '''simple docstring''' a__ = "$$$" a__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=lowerCamelCase , add_bos_token=lowerCamelCase ) a__ = "This is a simple input" a__ = ["This is a simple input 1", "This is a simple input 2"] a__ = tokenizer.bos_token_id a__ = tokenizer(lowerCamelCase ) a__ = tokenizer(lowerCamelCase ) self.assertEqual(out_s.input_ids[0] , lowerCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) a__ = tokenizer.decode(out_s.input_ids ) a__ = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowerCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __a ( self : Tuple ): '''simple docstring''' a__ = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" ) a__ = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#" a__ = "\nif len_a > len_b: result = a\nelse: result = b" a__ = tokenizer.encode(lowerCamelCase ) a__ = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"] a__ = tokenizer.decode(lowerCamelCase , truncate_before_pattern=lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase ) def __a ( self : Tuple ): '''simple docstring''' pass
489
import operator as op def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = lambda __UpperCamelCase , __UpperCamelCase : int(x / y ) # noqa: E731 integer division operation SCREAMING_SNAKE_CASE_ = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("Symbol".center(8 ) , "Action".center(1_2 ) , "Stack" , sep=" | " ) print("-" * (3_0 + len(__UpperCamelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(__UpperCamelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("push(" + x + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " ) else: SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " ) SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack # output in tabular format print("".rjust(8 ) , ("pop(" + a + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " ) stack.append( str(opr[x](int(__UpperCamelCase ) , int(__UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(1_2 ) , ",".join(__UpperCamelCase ) , sep=" | " , ) return int(stack[0] ) if __name__ == "__main__": A : str = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
140
0
import os from pathlib import Path def __A ( ): """simple docstring""" from torch.utils.cpp_extension import load __a = Path(_A ).resolve().parent.parent.parent / "kernels" / "deformable_detr" __a = [ root / filename for filename in [ "vision.cpp", os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ), os.path.join("cuda" , "ms_deform_attn_cuda.cu" ), ] ] load( "MultiScaleDeformableAttention" , _A , with_cuda=_A , extra_include_paths=[str(_A )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
525
def __A ( _A ): """simple docstring""" __a = [] for data in source_data: for i, el in enumerate(_A ): if len(_A ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_A ) ) return data_lists def __A ( _A , _A ): """simple docstring""" __a = [] for dlist, weight in zip(_A , _A ): __a = min(_A ) __a = max(_A ) __a = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: __a = f"""Invalid weight of {weight:f} provided""" raise ValueError(_A ) score_lists.append(_A ) return score_lists def __A ( _A ): """simple docstring""" __a = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_A ): __a = final_scores[j] + ele return final_scores def __A ( _A , _A ): """simple docstring""" __a = get_data(_A ) __a = calculate_each_score(_A , _A ) __a = generate_final_scores(_A ) # append scores to source data for i, ele in enumerate(_A ): source_data[i].append(_A ) return source_data
525
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _lowercase ( lowerCamelCase__ = True , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: """simple docstring""" if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) __UpperCAmelCase : Any = False if main_process_only: __UpperCAmelCase : Union[str, Any] = PartialState().local_process_index == 0 return _tqdm(*lowerCamelCase__ , **lowerCamelCase__ , disable=lowerCamelCase__ )
168
'''simple docstring''' import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def _lowercase ( lowerCamelCase__=32 , lowerCamelCase__=10 , lowerCamelCase__=100 , lowerCamelCase__=1026 , lowerCamelCase__=True , lowerCamelCase__="data/tokenized_stories_train_wikitext103.jbl" , lowerCamelCase__="igf_context_pairs.jbl" , ) -> str: """simple docstring""" set_seed(3 ) # generate train_data and objective_set __UpperCAmelCase , __UpperCAmelCase : Tuple = generate_datasets( lowerCamelCase__ , lowerCamelCase__ , number=lowerCamelCase__ , min_len=1026 , trim=lowerCamelCase__ ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __UpperCAmelCase : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model __UpperCAmelCase : Optional[int] = load_gpta("gpt2" ).to(lowerCamelCase__ ) print("computing perplexity on objective set" ) __UpperCAmelCase : str = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).item() print("perplexity on objective set:" , lowerCamelCase__ ) # collect igf pairs and save to file demo.jbl collect_objective_set(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def _lowercase ( lowerCamelCase__ , lowerCamelCase__=15 , lowerCamelCase__=128 , lowerCamelCase__=100 , lowerCamelCase__="igf_model.pt" , ) -> int: """simple docstring""" set_seed(42 ) # Load pre-trained model __UpperCAmelCase : Tuple = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model __UpperCAmelCase : Dict = SecondaryLearner(lowerCamelCase__ ) # Train secondary learner __UpperCAmelCase : Optional[int] = train_secondary_learner( lowerCamelCase__ , lowerCamelCase__ , max_epochs=lowerCamelCase__ , batch_size=lowerCamelCase__ , eval_freq=100 , igf_model_path=lowerCamelCase__ , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=32 , lowerCamelCase__=1000 , lowerCamelCase__=16 , lowerCamelCase__=1.0 , lowerCamelCase__=recopy_gpta , lowerCamelCase__=None , lowerCamelCase__=10 , lowerCamelCase__="gpt2_finetuned.pt" , ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) __UpperCAmelCase : Optional[int] = RandomSampler(lowerCamelCase__ ) __UpperCAmelCase : List[str] = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = max_steps // (len(lowerCamelCase__ )) + 1 __UpperCAmelCase : Any = 0 __UpperCAmelCase : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCamelCase__ ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = recopy_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.train() if secondary_learner is not None: secondary_learner.to(lowerCamelCase__ ) secondary_learner.eval() __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : Any = 0 __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = [] # Compute the performance of the transformer model at the beginning __UpperCAmelCase : Union[str, Any] = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) test_perps.append(lowerCamelCase__ ) print("Test perplexity, step" , lowerCamelCase__ , ":" , lowerCamelCase__ ) for epoch in range(int(lowerCamelCase__ ) ): for step, example in enumerate(lowerCamelCase__ ): torch.cuda.empty_cache() __UpperCAmelCase : Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 ) __UpperCAmelCase : Union[str, Any] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __UpperCAmelCase : int = model(lowerCamelCase__ , labels=lowerCamelCase__ ) __UpperCAmelCase : List[str] = True if secondary_learner is not None: __UpperCAmelCase : Dict = secondary_learner.forward( torch.tensor(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ ).unsqueeze(0 ) )[0].item() observed_qs.append(float(lowerCamelCase__ ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: __UpperCAmelCase : str = -1 if predicted_q < threshold: __UpperCAmelCase : Dict = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __UpperCAmelCase : Optional[Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __UpperCAmelCase : List[Any] = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __UpperCAmelCase : List[str] = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) test_perps.append(lowerCamelCase__ ) print("Test perplexity, step" , lowerCamelCase__ , ":" , lowerCamelCase__ ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , lowerCamelCase__ ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def _lowercase ( ) -> Optional[Any]: """simple docstring""" __UpperCAmelCase : Tuple = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=lowerCamelCase__ , default=lowerCamelCase__ , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=lowerCamelCase__ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=100 , type=lowerCamelCase__ , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=100 , type=lowerCamelCase__ , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=1000 , type=lowerCamelCase__ , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=128 , type=lowerCamelCase__ , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=lowerCamelCase__ , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=lowerCamelCase__ , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=100 , type=lowerCamelCase__ , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=1026 , type=lowerCamelCase__ , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=lowerCamelCase__ , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=lowerCamelCase__ , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=lowerCamelCase__ , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=lowerCamelCase__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner __UpperCAmelCase : Dict = joblib.load("data/IGF_values.jbl" ) # Train secondary learner __UpperCAmelCase : Dict = training_secondary_learner( lowerCamelCase__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model __UpperCAmelCase : str = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=lowerCamelCase__ ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=lowerCamelCase__ , secondary_learner=lowerCamelCase__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
168
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __a : List[str] = sys.version_info >= (3, 1_0) def UpperCAmelCase ( lowercase=None , lowercase=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=__A ) @dataclass class _UpperCamelCase : """simple docstring""" __a : Optional[Any] = 42 __a : Union[str, Any] = 42 __a : Union[str, Any] = 42 __a : int = 42 @dataclass class _UpperCamelCase : """simple docstring""" __a : Dict = 42 __a : List[str] = field(default='''toto''' ,metadata={'''help''': '''help message'''} ) @dataclass class _UpperCamelCase : """simple docstring""" __a : Optional[int] = False __a : Dict = True __a : str = None class _UpperCamelCase ( __A ): """simple docstring""" __a : Dict = '''titi''' __a : Dict = '''toto''' class _UpperCamelCase ( __A ): """simple docstring""" __a : Any = '''titi''' __a : int = '''toto''' __a : Optional[Any] = 42 @dataclass class _UpperCamelCase : """simple docstring""" __a : Tuple = '''toto''' def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = BasicEnum(self.foo ) @dataclass class _UpperCamelCase : """simple docstring""" __a : str = '''toto''' def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = MixedTypeEnum(self.foo ) @dataclass class _UpperCamelCase : """simple docstring""" __a : str = None __a : str = field(default=__A ,metadata={'''help''': '''help message'''} ) __a : str = None __a : Optional[int] = list_field(default=[] ) __a : List[Any] = list_field(default=[] ) @dataclass class _UpperCamelCase : """simple docstring""" __a : List[str] = list_field(default=[] ) __a : Union[str, Any] = list_field(default=[1, 2, 3] ) __a : Dict = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) __a : str = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class _UpperCamelCase : """simple docstring""" __a : Optional[int] = field() __a : List[str] = field() __a : Any = field() def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = BasicEnum(self.required_enum ) @dataclass class _UpperCamelCase : """simple docstring""" __a : List[str] = 42 __a : Union[str, Any] = field() __a : int = None __a : List[str] = field(default='''toto''' ,metadata={'''help''': '''help message'''} ) __a : Any = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class _UpperCamelCase : """simple docstring""" __a : Dict = False __a : Union[str, Any] = True __a : int = None @dataclass class _UpperCamelCase : """simple docstring""" __a : Tuple = None __a : int = field(default=__A ,metadata={'''help''': '''help message'''} ) __a : List[str] = None __a : List[Any] = list_field(default=[] ) __a : List[str] = list_field(default=[] ) class _UpperCamelCase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: '''simple docstring''' self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): __lowercase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''} __lowercase = {k: v for k, v in vars(lowerCAmelCase__ ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , lowerCAmelCase__ ) and yy.get('''choices''' , lowerCAmelCase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](lowerCAmelCase__ ) , yy['''type'''](lowerCAmelCase__ ) ) del xx["type"], yy["type"] self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ ) expected.add_argument('''--bar''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ ) expected.add_argument('''--baz''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ ) expected.add_argument('''--flag''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] (__lowercase ) = parser.parse_args_into_dataclasses(lowerCAmelCase__ , look_for_args_file=lowerCAmelCase__ ) self.assertFalse(example.flag ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=lowerCAmelCase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' ) expected.add_argument('''--baz''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , const=lowerCAmelCase__ , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowerCAmelCase__ , dest='''baz''' ) expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ ) __lowercase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCAmelCase__ ) for dataclass_type in dataclass_types: __lowercase = HfArgumentParser(lowerCAmelCase__ ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_args([] ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) ) __lowercase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) ) __lowercase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) ) __lowercase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) ) __lowercase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , baz=lowerCAmelCase__ , opt=lowerCAmelCase__ ) ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __lowercase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) __lowercase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __lowercase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) __lowercase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) __lowercase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' @dataclass class _UpperCamelCase : """simple docstring""" __a : List[str] = '''toto''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) __lowercase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) __lowercase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowerCAmelCase__ ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowerCAmelCase__ ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_args([] ) self.assertEqual( lowerCAmelCase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) __lowercase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(lowerCAmelCase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ ) expected.add_argument('''--bar''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''help message''' ) expected.add_argument('''--baz''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowerCAmelCase__ ) __lowercase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCAmelCase__ ) for dataclass_type in dataclass_types: __lowercase = HfArgumentParser(lowerCAmelCase__ ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_args([] ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=lowerCAmelCase__ , bar=lowerCAmelCase__ , baz=lowerCAmelCase__ , ces=[] , des=[] ) ) __lowercase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(lowerCAmelCase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ ) expected.add_argument('''--required_str''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowerCAmelCase__ , ) expected.add_argument('''--opt''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ ) expected.add_argument('''--baz''' , default='''toto''' , type=lowerCAmelCase__ , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowerCAmelCase__ ) self.argparsersEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } __lowercase = parser.parse_dict(lowerCAmelCase__ )[0] __lowercase = BasicExample(**lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(lowerCAmelCase__ , parser.parse_dict , lowerCAmelCase__ , allow_extra_keys=lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __lowercase = os.path.join(lowerCAmelCase__ , '''temp_json''' ) os.mkdir(lowerCAmelCase__ ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] __lowercase = BasicExample(**lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) __lowercase = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: __lowercase = os.path.join(lowerCAmelCase__ , '''temp_yaml''' ) os.mkdir(lowerCAmelCase__ ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(lowerCAmelCase__ , lowerCAmelCase__ ) __lowercase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] __lowercase = BasicExample(**lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' __lowercase = HfArgumentParser(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ )
708
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : List[str] = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Dict = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : Tuple = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a : List[str] = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
522
0
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowercase__ ( A_ ): @slow @require_torch def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""") _lowerCamelCase : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""") _lowerCamelCase : Tuple = bertabert.config.encoder.vocab_size _lowerCamelCase : int = tokenizer.sep_token_id _lowerCamelCase : List[str] = tokenizer.cls_token_id _lowerCamelCase : Optional[int] = 128 _lowerCamelCase : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""") _lowerCamelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""") _lowerCamelCase : List[Any] = train_dataset.select(range(32)) _lowerCamelCase : Optional[int] = val_dataset.select(range(16)) _lowerCamelCase : Any = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE): # Tokenizer will automatically set [BOS] <text> [EOS] _lowerCamelCase : List[Any] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE , max_length=512) _lowerCamelCase : Any = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE , max_length=128) _lowerCamelCase : Optional[Any] = inputs.input_ids _lowerCamelCase : Optional[int] = inputs.attention_mask _lowerCamelCase : Union[str, Any] = outputs.input_ids _lowerCamelCase : str = outputs.input_ids.copy() _lowerCamelCase : List[Any] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] _lowerCamelCase : Dict = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE) == 512 for x in inputs.input_ids) assert all(len(SCREAMING_SNAKE_CASE) == 128 for x in outputs.input_ids) return batch def _compute_metrics(SCREAMING_SNAKE_CASE): _lowerCamelCase : Any = pred.label_ids _lowerCamelCase : Optional[int] = pred.predictions # all unnecessary tokens are removed _lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = sum([int(pred_str[i] == label_str[i]) for i in range(len(SCREAMING_SNAKE_CASE))]) / len(SCREAMING_SNAKE_CASE) return {"accuracy": accuracy} # map train dataset _lowerCamelCase : str = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset _lowerCamelCase : Union[str, Any] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) _lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : Union[str, Any] = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _lowerCamelCase : Optional[Any] = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
88
import string def _A ( _lowercase ) -> None: """simple docstring""" for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = '' for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(_lowercase ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f'''Decryption using Key #{key}: {translated}''' ) def _A ( ) -> None: """simple docstring""" __UpperCamelCase = input('Encrypted message: ' ) __UpperCamelCase = message.upper() decrypt(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
0
'''simple docstring''' from __future__ import annotations __lowercase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): lowerCAmelCase_ : Any =[ [0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] # the reference grid lowerCAmelCase_ : Union[str, Any] =1 lowerCAmelCase_ : List[Any] =[ [0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) ) ] # the action grid lowerCAmelCase_ : Optional[Any] =init[0] lowerCAmelCase_ : Optional[int] =init[1] lowerCAmelCase_ : str =0 lowerCAmelCase_ : Dict =g + heuristic[x][y] # cost from starting cell to destination cell lowerCAmelCase_ : str =[[f, g, x, y]] lowerCAmelCase_ : Any =False # flag that is set when search is complete lowerCAmelCase_ : Tuple =False # flag set if we can't find expand while not found and not resign: if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCAmelCase_ : Tuple =cell.pop() lowerCAmelCase_ : str =next_cell[2] lowerCAmelCase_ : Optional[int] =next_cell[3] lowerCAmelCase_ : List[Any] =next_cell[1] if x == goal[0] and y == goal[1]: lowerCAmelCase_ : Optional[Any] =True else: for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions lowerCAmelCase_ : Optional[int] =x + DIRECTIONS[i][0] lowerCAmelCase_ : Union[str, Any] =y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCAmelCase_ : Dict =g + cost lowerCAmelCase_ : Optional[Any] =ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCAmelCase_ : List[str] =1 lowerCAmelCase_ : Tuple =i lowerCAmelCase_ : Optional[int] =[] lowerCAmelCase_ : Optional[int] =goal[0] lowerCAmelCase_ : Any =goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCAmelCase_ : Dict =x - DIRECTIONS[action[x][y]][0] lowerCAmelCase_ : str =y - DIRECTIONS[action[x][y]][1] lowerCAmelCase_ : str =xa lowerCAmelCase_ : Union[str, Any] =ya invpath.append([x, y] ) lowerCAmelCase_ : List[str] =[] for i in range(len(_SCREAMING_SNAKE_CASE ) ): path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] ) return path, action if __name__ == "__main__": __lowercase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __lowercase = [0, 0] # all coordinates are given in format [y,x] __lowercase = [len(grid) - 1, len(grid[0]) - 1] __lowercase = 1 # the cost map which pushes the path closer to the goal __lowercase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __lowercase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __lowercase = 99 __lowercase , __lowercase = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
700
'''simple docstring''' from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration __lowercase = HfArgumentParser(InitializationArguments) __lowercase = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization __lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks __lowercase = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) __lowercase = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config __lowercase = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
305
0
from pathlib import Path import numpy as np from PIL import Image def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray: """simple docstring""" return (gray > 1_27) & (gray <= 2_55) def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ) -> np.ndarray: """simple docstring""" _UpperCAmelCase = np.zeros_like(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase_ = Path(__file__).resolve().parent / "image_data" / "lena.jpg" UpperCAmelCase_ = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase_ = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
32
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' __A : Tuple = ["image_processor", "tokenizer"] __A : Any = "ViTImageProcessor" __A : Dict = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __A=None , __A=None , **__A ): """simple docstring""" lowerCamelCase : str = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __A , ) lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" ) lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__A , __A ) def __call__( self , __A=None , __A=None , __A=None , __A=None , **__A ): """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCamelCase : Any = self.tokenizer(__A , return_tensors=__A , **__A ) if visual_prompt is not None: lowerCamelCase : Optional[Any] = self.image_processor(__A , return_tensors=__A , **__A ) if images is not None: lowerCamelCase : Optional[int] = self.image_processor(__A , return_tensors=__A , **__A ) if visual_prompt is not None and images is not None: lowerCamelCase : Tuple = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCamelCase : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCamelCase : Optional[int] = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self , *__A , **__A ): """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , ) return self.image_processor_class @property def _snake_case ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , ) return self.image_processor
340
0
"""simple docstring""" import re import string import numpy as np import datasets A_ : Optional[int] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" A_ : List[str] = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" A_ : int = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): '''simple docstring''' def a__ (self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence' ), 'references': datasets.Value('string', id='sequence' ), } ), reference_urls=[], ) def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCamelCase__ : Dict = np.array([re.sub(lowerCamelCase_, '', lowerCamelCase_ ) for x in predictions] ) lowerCamelCase__ : List[Any] = np.array([re.sub(lowerCamelCase_, '', lowerCamelCase_ ) for x in references] ) else: lowerCamelCase__ : int = np.asarray(lowerCamelCase_ ) lowerCamelCase__ : str = np.asarray(lowerCamelCase_ ) if ignore_case: lowerCamelCase__ : Optional[Any] = np.char.lower(lowerCamelCase_ ) lowerCamelCase__ : Optional[int] = np.char.lower(lowerCamelCase_ ) if ignore_punctuation: lowerCamelCase__ : Dict = string.punctuation.maketrans('', '', string.punctuation ) lowerCamelCase__ : Any = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ ) lowerCamelCase__ : str = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ ) if ignore_numbers: lowerCamelCase__ : Optional[int] = string.digits.maketrans('', '', string.digits ) lowerCamelCase__ : Union[str, Any] = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ ) lowerCamelCase__ : Optional[Any] = np.char.translate(lowerCamelCase_, table=lowerCamelCase_ ) lowerCamelCase__ : Any = predictions == references return {"exact_match": np.mean(lowerCamelCase_ ) * 1_0_0}
705
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py A_ : Dict = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. A_ : List[Any] = importlib.util.spec_from_file_location( "transformers", os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) A_ : Union[str, Any] = spec.loader.load_module() A_ : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` A_ : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)") A_ : str = { "CLIPConfigMixin", "DecisionTransformerConfigMixin", "EncoderDecoderConfigMixin", "RagConfigMixin", "SpeechEncoderDecoderConfigMixin", "VisionEncoderDecoderConfigMixin", "VisionTextDualEncoderConfigMixin", } def lowerCamelCase_ ( ): lowerCamelCase__ : Dict = [] for config_class in list(CONFIG_MAPPING.values() ): lowerCamelCase__ : Dict = False # source code of `config_class` lowerCamelCase__ : str = inspect.getsource(_lowerCamelCase ) lowerCamelCase__ : Union[str, Any] = _re_checkpoint.findall(_lowerCamelCase ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` lowerCamelCase__ , lowerCamelCase__ : Optional[int] = checkpoint # verify the checkpoint name corresponds to the checkpoint link lowerCamelCase__ : Any = f'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowerCamelCase__ : Any = True break lowerCamelCase__ : Dict = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCamelCase ) if len(_lowerCamelCase ) > 0: lowerCamelCase__ : Optional[Any] = '\n'.join(sorted(_lowerCamelCase ) ) raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
696
0
from __future__ import annotations from math import pi, sqrt def _A (UpperCamelCase : float , UpperCamelCase : float ) ->tuple: '''simple docstring''' if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
157
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __A ( A_ ): UpperCamelCase :List[str] = '''gpt_neo''' UpperCamelCase :Tuple = ['''past_key_values'''] UpperCamelCase :Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__(self , __magic_name__=50257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1E-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=50256 , __magic_name__=50256 , **__magic_name__ , ): lowerCamelCase__ : Dict = vocab_size lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : str = hidden_size lowerCamelCase__ : List[Any] = num_layers lowerCamelCase__ : List[Any] = num_heads lowerCamelCase__ : str = intermediate_size lowerCamelCase__ : str = window_size lowerCamelCase__ : List[Any] = activation_function lowerCamelCase__ : Any = resid_dropout lowerCamelCase__ : Dict = embed_dropout lowerCamelCase__ : str = attention_dropout lowerCamelCase__ : str = classifier_dropout lowerCamelCase__ : str = layer_norm_epsilon lowerCamelCase__ : Optional[int] = initializer_range lowerCamelCase__ : int = use_cache lowerCamelCase__ : List[Any] = bos_token_id lowerCamelCase__ : int = eos_token_id lowerCamelCase__ : str = attention_types lowerCamelCase__ : List[str] = self.expand_attention_types_params(__magic_name__ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, " f"`config.num_layers = {self.num_layers}`. " """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ ) @staticmethod def _snake_case (__magic_name__ ): lowerCamelCase__ : Optional[Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def _A (UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ) ->int: '''simple docstring''' import torch lowerCamelCase__ : Any = input.size() lowerCamelCase__ : Tuple = len(UpperCamelCase ) lowerCamelCase__ : str = shape[dimension] lowerCamelCase__ : Optional[int] = torch.arange(0 , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1 lowerCamelCase__ : Tuple = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None] lowerCamelCase__ : Dict = [slice(UpperCamelCase )] * rank lowerCamelCase__ : Union[str, Any] = indices lowerCamelCase__ : Optional[int] = input[s] lowerCamelCase__ : int = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(UpperCamelCase ) def _A (UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) ->Tuple: '''simple docstring''' import torch lowerCamelCase__ : List[Any] = torch.arange(1 , UpperCamelCase ) lowerCamelCase__ : Any = torch.remainder(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Optional[int] = remainders == 0 lowerCamelCase__ : List[str] = candidates[divisor_indices] lowerCamelCase__ : List[Any] = torch.max(UpperCamelCase ) return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" ) class __A ( A_ ): @property def _snake_case (self ): lowerCamelCase__ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" ) lowerCamelCase__ : Any = {0: """batch""", 1: """past_sequence + sequence"""} else: lowerCamelCase__ : int = {0: """batch""", 1: """sequence"""} return common_inputs @property def _snake_case (self ): return self._config.num_heads def _snake_case (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ): lowerCamelCase__ : Union[str, Any] = super(__magic_name__ , self ).generate_dummy_inputs( __magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ ) # We need to order the input in the way they appears in the forward() lowerCamelCase__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowerCamelCase__ ,lowerCamelCase__ : str = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowerCamelCase__ : Any = seqlen + 2 lowerCamelCase__ : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCamelCase__ : Dict = [ (torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers ) ] lowerCamelCase__ : Dict = common_inputs["""attention_mask"""] if self.use_past: lowerCamelCase__ : int = ordered_inputs["""attention_mask"""].dtype lowerCamelCase__ : List[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 ) return ordered_inputs @property def _snake_case (self ): return 13
157
1
from __future__ import annotations from math import pow, sqrt def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(__snake_case ,2 ) - pow(__snake_case ,2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__snake_case ,2 ) - pow(__snake_case ,2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__snake_case ,2 ) + pow(__snake_case ,2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
29
import string from math import logaa def lowerCAmelCase__(__snake_case ,__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = document.translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' ) lowerCamelCase__ = document_without_punctuation.split(''' ''' ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> tuple[int, int]: '''simple docstring''' lowerCamelCase__ = corpus.lower().translate( str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase__ = corpus_without_punctuation.split('''\n''' ) lowerCamelCase__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__snake_case )) def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> float: '''simple docstring''' if smoothing: if n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(1 + logaa(n / (1 + df) ) ,3 ) if df == 0: raise ZeroDivisionError('''df must be > 0''' ) elif n == 0: raise ValueError('''log10(0) is undefined.''' ) return round(logaa(n / df ) ,3 ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> float: '''simple docstring''' return round(tf * idf ,3 )
29
1
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def A__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ): SCREAMING_SNAKE_CASE__: List[Any]= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__: int= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def A__ ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Tuple ): SCREAMING_SNAKE_CASE__: int= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE__: Tuple= features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__: Union[str, Any]= ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__: List[str]= JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def A__ ( snake_case_ : Dict , snake_case_ : int , snake_case_ : Any ): SCREAMING_SNAKE_CASE__: str= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: Optional[Any]= {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} SCREAMING_SNAKE_CASE__: Union[str, Any]= features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__: int= ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__: Optional[Any]= JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def A__ ( snake_case_ : Tuple , snake_case_ : int ): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} SCREAMING_SNAKE_CASE__: int= {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} SCREAMING_SNAKE_CASE__: Optional[Any]= features.copy() SCREAMING_SNAKE_CASE__: Any= ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__: Optional[Any]= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: Union[str, Any]= JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read() assert isinstance(snake_case_ , snake_case_ ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def A__ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Tuple ): SCREAMING_SNAKE_CASE__: Tuple= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE__: Any= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def A__ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ): if issubclass(snake_case_ , snake_case_ ): SCREAMING_SNAKE_CASE__: str= jsonl_path elif issubclass(snake_case_ , snake_case_ ): SCREAMING_SNAKE_CASE__: int= [jsonl_path] SCREAMING_SNAKE_CASE__: Optional[Any]= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: Optional[int]= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE__: Union[str, Any]= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_dataset(snake_case_ , snake_case_ ) def A__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : str=("train",) ): assert isinstance(snake_case_ , snake_case_ ) for split in splits: SCREAMING_SNAKE_CASE__: str= dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def A__ ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Union[str, Any] ): SCREAMING_SNAKE_CASE__: Tuple= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: int= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE__: int= JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def A__ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: Optional[int]= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: Dict= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE__: List[Any]= features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE__: Any= ( Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE__: str= JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def A__ ( snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple ): if split: SCREAMING_SNAKE_CASE__: Dict= {split: jsonl_path} else: SCREAMING_SNAKE_CASE__: Any= '''train''' SCREAMING_SNAKE_CASE__: Any= {'''train''': jsonl_path, '''test''': jsonl_path} SCREAMING_SNAKE_CASE__: int= tmp_path / '''cache''' SCREAMING_SNAKE_CASE__: Dict= {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE__: Union[str, Any]= JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read() _check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def A__ ( snake_case_ : List[str] ): return json.load(snake_case_ ) def A__ ( snake_case_ : Dict ): return [json.loads(snake_case_ ) for line in buffer] class _lowerCamelCase : @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__: Optional[Any]= load_json_function(lowerCAmelCase ) assert isinstance(lowerCAmelCase , lowerCAmelCase ) assert isinstance(exported_content[0] , lowerCAmelCase ) assert len(lowerCAmelCase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase , orient=lowerCAmelCase ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__: int= load_json(lowerCAmelCase ) assert isinstance(lowerCAmelCase , lowerCAmelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowerCAmelCase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase , num_proc=2 ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__: List[str]= load_json_function(lowerCAmelCase ) assert isinstance(lowerCAmelCase , lowerCAmelCase ) assert isinstance(exported_content[0] , lowerCAmelCase ) assert len(lowerCAmelCase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int: with io.BytesIO() as buffer: JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , lines=lowerCAmelCase , orient=lowerCAmelCase , num_proc=2 ).write() buffer.seek(0 ) SCREAMING_SNAKE_CASE__: Union[str, Any]= load_json(lowerCAmelCase ) assert isinstance(lowerCAmelCase , lowerCAmelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowerCAmelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowerCAmelCase ) == 10 def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: with pytest.raises(lowerCAmelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]: SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / f'test.json.{extension}' SCREAMING_SNAKE_CASE__: Dict= str(shared_datadir / f'test_file.json.{extension}' ) JsonDatasetWriter(lowerCAmelCase , lowerCAmelCase , compression=lowerCAmelCase ).write() with fsspec.open(lowerCAmelCase , '''rb''' , compression='''infer''' ) as f: SCREAMING_SNAKE_CASE__: Tuple= f.read() with fsspec.open(lowerCAmelCase , '''rb''' , compression='''infer''' ) as f: SCREAMING_SNAKE_CASE__: Dict= f.read() assert exported_content == original_content
64
"""simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
156
0
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase__ ( _A: int , _A: List[Any] , _A: Union[str, Any] ): '''simple docstring''' __lowerCamelCase = RemBertConfig.from_json_file(_A ) print("""Building PyTorch model from configuration: {}""".format(str(_A ) ) ) __lowerCamelCase = RemBertModel(_A ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_A , _A , _A ) # Save pytorch-model print("""Save PyTorch model to {}""".format(_A ) ) torch.save(model.state_dict() , _A ) if __name__ == "__main__": _a : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--rembert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained RemBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _a : List[str] = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
571
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase__ ( _A: str , _A: str , _A: str , _A: PreTrainedTokenizer , _A: int , _A: Optional[int] = None , ): '''simple docstring''' __lowerCamelCase = {} if train_file is not None: __lowerCamelCase = [train_file] if eval_file is not None: __lowerCamelCase = [eval_file] if test_file is not None: __lowerCamelCase = [test_file] __lowerCamelCase = datasets.load_dataset("""csv""" , data_files=_A ) __lowerCamelCase = list(ds[list(files.keys() )[0]].features.keys() ) __lowerCamelCase = features_name.pop(_A ) __lowerCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowerCamelCase = {label: i for i, label in enumerate(_A )} __lowerCamelCase = tokenizer.model_input_names __lowerCamelCase = {} if len(_A ) == 1: for k in files.keys(): __lowerCamelCase = ds[k].map( lambda _A : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_A , max_length=_A , padding="""max_length""" ) , batched=_A , ) elif len(_A ) == 2: for k in files.keys(): __lowerCamelCase = ds[k].map( lambda _A : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_A , max_length=_A , padding="""max_length""" , ) , batched=_A , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowerCamelCase = {k: v for k, v in ex.items() if k in input_names} __lowerCamelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowerCamelCase = {k: v for k, v in ex.items() if k in input_names} __lowerCamelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowerCamelCase = {k: v for k, v in ex.items() if k in input_names} __lowerCamelCase = labelaid[ex[label_name]] yield (d, label) __lowerCamelCase = ( tf.data.Dataset.from_generator( _A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowerCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowerCamelCase = ( tf.data.Dataset.from_generator( _A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowerCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowerCamelCase = ( tf.data.Dataset.from_generator( _A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowerCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _a : str = logging.getLogger(__name__) @dataclass class UpperCamelCase_ : """simple docstring""" A = field(metadata={'''help''': '''Which column contains the label'''} ) A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the training file'''} ) A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the development file'''} ) A = field(default=__UpperCamelCase ,metadata={'''help''': '''The path of the test file'''} ) A = field( default=128 ,metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } ,) A = field( default=__UpperCamelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class UpperCamelCase_ : """simple docstring""" A = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) A = field( default=__UpperCamelCase ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) A = field( default=__UpperCamelCase ,metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) A = field(default=__UpperCamelCase ,metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A = field( default=__UpperCamelCase ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,) def UpperCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowerCamelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , ) def compute_metrics(_A: EvalPrediction ) -> Dict: __lowerCamelCase = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowerCamelCase = TFTrainer( model=_A , args=_A , train_dataset=_A , eval_dataset=_A , compute_metrics=_A , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowerCamelCase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __lowerCamelCase = trainer.evaluate() __lowerCamelCase = os.path.join(training_args.output_dir , """eval_results.txt""" ) with open(_A , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(_A ) return results if __name__ == "__main__": main()
571
1
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
642
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
518
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Optional[Any] = { "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class lowerCamelCase__ ( UpperCAmelCase_ ): lowerCAmelCase = """lilt""" def __init__( self : Optional[Any] , _lowercase : Dict=30_522 , _lowercase : Any=768 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : str=3_072 , _lowercase : int="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=512 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=0.0_2 , _lowercase : int=1e-12 , _lowercase : Any=0 , _lowercase : List[str]="absolute" , _lowercase : Dict=None , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=1_024 , **_lowercase : Union[str, Any] , ): super().__init__(pad_token_id=_lowercase , **_lowercase ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = position_embedding_type A = classifier_dropout A = channel_shrink_ratio A = max_ad_position_embeddings
91
0
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __magic_name__ ( lowerCAmelCase ): def __init__( self , snake_case , snake_case , snake_case) -> Any: '''simple docstring''' _UpperCAmelCase : Optional[Any] =dataset _UpperCAmelCase : Union[str, Any] =process _UpperCAmelCase : Optional[Any] =params def __len__( self) -> Optional[int]: '''simple docstring''' return len(self.dataset) def __getitem__( self , snake_case) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =self.dataset[i] _UpperCAmelCase : Union[str, Any] =self.process(snake_case , **self.params) return processed class __magic_name__ ( lowerCAmelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Any =loader _UpperCAmelCase : Any =infer _UpperCAmelCase : Optional[Any] =params if loader_batch_size == 1: # Let's spare some time by deactivating altogether _UpperCAmelCase : Any =None _UpperCAmelCase : Optional[int] =loader_batch_size # Internal bookkeeping _UpperCAmelCase : Tuple =None _UpperCAmelCase : int =None def __len__( self) -> int: '''simple docstring''' return len(self.loader) def __iter__( self) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Optional[Any] =iter(self.loader) return self def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor): # Batch data is simple tensor, just fetch the slice _UpperCAmelCase : str =self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) _UpperCAmelCase : Tuple ={} for k, element in self._loader_batch_data.items(): if isinstance(snake_case , snake_case): # Convert ModelOutput to tuple first _UpperCAmelCase : str =element.to_tuple() if isinstance(element[0] , torch.Tensor): _UpperCAmelCase : Tuple =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0] , np.ndarray): _UpperCAmelCase : Dict =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case , snake_case): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor): _UpperCAmelCase : Union[str, Any] =tuple(el[self._loader_batch_index].unsqueeze(0) for el in element) elif isinstance(element[0] , np.ndarray): _UpperCAmelCase : Union[str, Any] =tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element) continue if element is None: # This can happen for optional data that get passed around _UpperCAmelCase : List[Any] =None elif isinstance(element[self._loader_batch_index] , torch.Tensor): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase : int =element[self._loader_batch_index].unsqueeze(0) elif isinstance(element[self._loader_batch_index] , np.ndarray): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers _UpperCAmelCase : List[Any] =np.expand_dims(element[self._loader_batch_index] , 0) else: # This is typically a list, so no need to `unsqueeze`. _UpperCAmelCase : Union[str, Any] =element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 _UpperCAmelCase : Union[str, Any] =self._loader_batch_data.__class__(snake_case) self._loader_batch_index += 1 return result def lowerCAmelCase ( self) -> Any: '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch _UpperCAmelCase : Any =next(self.iterator) _UpperCAmelCase : Dict =self.infer(snake_case , **self.params) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(snake_case , torch.Tensor): _UpperCAmelCase : int =processed else: _UpperCAmelCase : Optional[int] =list(processed.keys())[0] _UpperCAmelCase : List[Any] =processed[key] if isinstance(snake_case , snake_case): _UpperCAmelCase : List[str] =len(snake_case) else: _UpperCAmelCase : int =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase : List[str] =observed_batch_size # Setting internal index to unwrap the batch _UpperCAmelCase : Union[str, Any] =processed _UpperCAmelCase : Optional[Any] =0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __magic_name__ ( lowerCAmelCase ): def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> Any: '''simple docstring''' super().__init__(snake_case , snake_case , snake_case) def __iter__( self) -> List[Any]: '''simple docstring''' _UpperCAmelCase : int =iter(self.loader) _UpperCAmelCase : Any =None return self def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' if self.subiterator is None: _UpperCAmelCase : Optional[int] =self.infer(next(self.iterator) , **self.params) try: # Try to return next item _UpperCAmelCase : Any =next(self.subiterator) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators _UpperCAmelCase : Union[str, Any] =self.infer(next(self.iterator) , **self.params) _UpperCAmelCase : str =next(self.subiterator) return processed class __magic_name__ ( lowerCAmelCase ): def __iter__( self) -> str: '''simple docstring''' _UpperCAmelCase : int =iter(self.loader) return self def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. _UpperCAmelCase : List[str] =False _UpperCAmelCase : Tuple =[] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase : int =self.loader_batch_item() _UpperCAmelCase : Any =item.pop('is_last') accumulator.append(snake_case) if is_last: return accumulator while not is_last: _UpperCAmelCase : List[Any] =self.infer(next(self.iterator) , **self.params) if self.loader_batch_size is not None: if isinstance(snake_case , torch.Tensor): _UpperCAmelCase : List[Any] =processed else: _UpperCAmelCase : str =list(processed.keys())[0] _UpperCAmelCase : Dict =processed[key] if isinstance(snake_case , snake_case): _UpperCAmelCase : Union[str, Any] =len(snake_case) else: _UpperCAmelCase : Optional[int] =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. _UpperCAmelCase : Tuple =observed_batch_size _UpperCAmelCase : List[str] =processed _UpperCAmelCase : Union[str, Any] =0 while self._loader_batch_index < self.loader_batch_size: _UpperCAmelCase : Tuple =self.loader_batch_item() _UpperCAmelCase : int =item.pop('is_last') accumulator.append(snake_case) if is_last: return accumulator else: _UpperCAmelCase : int =processed _UpperCAmelCase : Optional[int] =item.pop('is_last') accumulator.append(snake_case) return accumulator class __magic_name__ ( lowerCAmelCase ): def __init__( self , snake_case , snake_case) -> Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =dataset _UpperCAmelCase : List[Any] =key def __len__( self) -> Any: '''simple docstring''' return len(self.dataset) def __getitem__( self , snake_case) -> int: '''simple docstring''' return self.dataset[i][self.key] class __magic_name__ ( lowerCAmelCase ): def __init__( self , snake_case , snake_case , snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : List[str] =dataset _UpperCAmelCase : Union[str, Any] =keya _UpperCAmelCase : int =keya def __len__( self) -> Optional[Any]: '''simple docstring''' return len(self.dataset) def __getitem__( self , snake_case) -> Tuple: '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
446
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class __magic_name__ : def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , snake_case=1_0_0_0 , ) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : List[str] =parent _UpperCAmelCase : str =batch_size _UpperCAmelCase : List[Any] =seq_length _UpperCAmelCase : List[str] =is_training _UpperCAmelCase : Any =use_input_mask _UpperCAmelCase : List[str] =use_token_type_ids _UpperCAmelCase : Optional[int] =use_labels _UpperCAmelCase : Optional[Any] =vocab_size _UpperCAmelCase : str =hidden_size _UpperCAmelCase : str =num_hidden_layers _UpperCAmelCase : Tuple =num_attention_heads _UpperCAmelCase : List[Any] =intermediate_size _UpperCAmelCase : str =hidden_act _UpperCAmelCase : str =hidden_dropout_prob _UpperCAmelCase : Any =attention_probs_dropout_prob _UpperCAmelCase : int =max_position_embeddings _UpperCAmelCase : List[Any] =type_vocab_size _UpperCAmelCase : List[str] =type_sequence_label_size _UpperCAmelCase : Union[str, Any] =initializer_range _UpperCAmelCase : Tuple =num_labels _UpperCAmelCase : int =num_choices _UpperCAmelCase : List[str] =scope _UpperCAmelCase : Any =range_bbox def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) # convert bbox to numpy since TF does not support item assignment _UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _UpperCAmelCase : Optional[Any] =bbox[i, j, 3] _UpperCAmelCase : Optional[Any] =bbox[i, j, 1] _UpperCAmelCase : str =t if bbox[i, j, 2] < bbox[i, j, 0]: _UpperCAmelCase : Optional[int] =bbox[i, j, 2] _UpperCAmelCase : Tuple =bbox[i, j, 0] _UpperCAmelCase : Dict =t _UpperCAmelCase : Union[str, Any] =tf.convert_to_tensor(snake_case) _UpperCAmelCase : str =None if self.use_input_mask: _UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase : Union[str, Any] =None if self.use_token_type_ids: _UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCAmelCase : Optional[int] =None _UpperCAmelCase : Tuple =None _UpperCAmelCase : int =None if self.use_labels: _UpperCAmelCase : str =ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCAmelCase : Tuple =ids_tensor([self.batch_size] , self.num_choices) _UpperCAmelCase : Optional[int] =LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Dict: '''simple docstring''' _UpperCAmelCase : int =TFLayoutLMModel(config=snake_case) _UpperCAmelCase : Dict =model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case) _UpperCAmelCase : Dict =model(snake_case , snake_case , token_type_ids=snake_case) _UpperCAmelCase : int =model(snake_case , snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Optional[Any] =TFLayoutLMForMaskedLM(config=snake_case) _UpperCAmelCase : Optional[Any] =model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Any: '''simple docstring''' _UpperCAmelCase : Dict =self.num_labels _UpperCAmelCase : Dict =TFLayoutLMForSequenceClassification(config=snake_case) _UpperCAmelCase : Union[str, Any] =model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : str =self.num_labels _UpperCAmelCase : Any =TFLayoutLMForTokenClassification(config=snake_case) _UpperCAmelCase : str =model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> str: '''simple docstring''' _UpperCAmelCase : Any =TFLayoutLMForQuestionAnswering(config=snake_case) _UpperCAmelCase : Union[str, Any] =model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Any =self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : Optional[int] =config_and_inputs _UpperCAmelCase : Any ={ 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ): UpperCAmelCase =( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) UpperCAmelCase =( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase =False UpperCAmelCase =True UpperCAmelCase =1_0 def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : List[str] =TFLayoutLMModelTester(self) _UpperCAmelCase : int =ConfigTester(self , config_class=snake_case , hidden_size=3_7) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case) def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case) def lowerCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case) def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case) def lowerCAmelCase ( self) -> str: '''simple docstring''' _UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case) @slow def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : str =TFLayoutLMModel.from_pretrained(snake_case) self.assertIsNotNone(snake_case) @unittest.skip('Onnx compliancy broke with TF 2.10') def lowerCAmelCase ( self) -> int: '''simple docstring''' pass def lowerCamelCase__ ( ): '''simple docstring''' _UpperCAmelCase : int =tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231 _UpperCAmelCase : Optional[int] =tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _UpperCAmelCase : Dict =tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231 _UpperCAmelCase : List[Any] =tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _UpperCAmelCase : List[Any] =tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class __magic_name__ ( unittest.TestCase ): @slow def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] =TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased') _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple =prepare_layoutlm_batch_inputs() # forward pass _UpperCAmelCase : Dict =model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case) # test the sequence output on [0, :3, :3] _UpperCAmelCase : List[Any] =tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-3)) # test the pooled output on [1, :3] _UpperCAmelCase : List[Any] =tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52]) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1E-3)) @slow def lowerCAmelCase ( self) -> Dict: '''simple docstring''' # initialize model with randomly initialized sequence classification head _UpperCAmelCase : str =TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int =prepare_layoutlm_batch_inputs() # forward pass _UpperCAmelCase : Optional[Any] =model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1]) , ) # test whether we get a loss as a scalar _UpperCAmelCase : Optional[int] =outputs.loss _UpperCAmelCase : int =(2,) self.assertEqual(loss.shape , snake_case) # test the shape of the logits _UpperCAmelCase : int =outputs.logits _UpperCAmelCase : Union[str, Any] =(2, 2) self.assertEqual(logits.shape , snake_case) @slow def lowerCAmelCase ( self) -> int: '''simple docstring''' # initialize model with randomly initialized token classification head _UpperCAmelCase : Optional[int] =TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =prepare_layoutlm_batch_inputs() # forward pass _UpperCAmelCase : str =model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case) # test the shape of the logits _UpperCAmelCase : str =outputs.logits _UpperCAmelCase : Optional[Any] =tf.convert_to_tensor((2, 2_5, 1_3)) self.assertEqual(logits.shape , snake_case) @slow def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' # initialize model with randomly initialized token classification head _UpperCAmelCase : Optional[Any] =TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased') _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =prepare_layoutlm_batch_inputs() # forward pass _UpperCAmelCase : Any =model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case) # test the shape of the logits _UpperCAmelCase : Optional[int] =tf.convert_to_tensor((2, 2_5)) self.assertEqual(outputs.start_logits.shape , snake_case) self.assertEqual(outputs.end_logits.shape , snake_case)
446
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case__ ( self ): """simple docstring""" torch.manual_seed(0 ) __A : Optional[int] = UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def snake_case__ ( self ): """simple docstring""" torch.manual_seed(0 ) __A : Dict = UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=10 , ) return model @property def snake_case__ ( self ): """simple docstring""" torch.manual_seed(0 ) __A : Union[str, Any] = AutoencoderKL( sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) __A : Union[str, Any] = UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def snake_case__ ( self ): """simple docstring""" __A : str = 'cpu' # ensure determinism for the device-dependent torch.Generator __A : int = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) __A : Tuple = DDPMScheduler() __A : Optional[int] = AudioDiffusionPipeline(vqvae=__lowercase , unet=self.dummy_unet , mel=__lowercase , scheduler=__lowercase ) __A : Dict = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __A : Tuple = torch.Generator(device=__lowercase ).manual_seed(42 ) __A : Tuple = pipe(generator=__lowercase , steps=4 ) __A : Union[str, Any] = output.audios[0] __A : Dict = output.images[0] __A : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(42 ) __A : Optional[int] = pipe(generator=__lowercase , steps=4 , return_dict=__lowercase ) __A : int = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) __A : Optional[int] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __A : List[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:10] __A : Optional[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 __A : Optional[int] = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) __A : Optional[Any] = DDIMScheduler() __A : List[Any] = self.dummy_vqvae_and_unet __A : int = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__lowercase , scheduler=__lowercase ) __A : Dict = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) np.random.seed(0 ) __A : Dict = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) __A : Dict = torch.Generator(device=__lowercase ).manual_seed(42 ) __A : List[Any] = pipe(raw_audio=__lowercase , generator=__lowercase , start_step=5 , steps=10 ) __A : Tuple = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) __A : int = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __A : List[str] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 __A : Any = self.dummy_unet_condition __A : Optional[int] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__lowercase , mel=__lowercase , scheduler=__lowercase ) __A : Union[str, Any] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) np.random.seed(0 ) __A : Dict = torch.rand((1, 1, 10) ) __A : Any = pipe(generator=__lowercase , encoding=__lowercase ) __A : Union[str, Any] = output.images[0] __A : List[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __A : int = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self ): """simple docstring""" __A : Union[str, Any] = torch_device __A : Union[str, Any] = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) __A : Dict = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __A : Any = torch.Generator(device=__lowercase ).manual_seed(42 ) __A : Optional[Any] = pipe(generator=__lowercase ) __A : Union[str, Any] = output.audios[0] __A : List[Any] = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] __A : Union[str, Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:10] __A : Optional[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
714
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _lowerCamelCase : '''simple docstring''' def __init__( self , __lowercase = None ): """simple docstring""" if components is None: __A : Optional[int] = [] __A : Dict = list(__lowercase ) def __len__( self ): """simple docstring""" return len(self.__components ) def __str__( self ): """simple docstring""" return "(" + ",".join(map(__lowercase , self.__components ) ) + ")" def __add__( self , __lowercase ): """simple docstring""" __A : Union[str, Any] = len(self ) if size == len(__lowercase ): __A : Dict = [self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )] return Vector(__lowercase ) else: raise Exception('must have the same size' ) def __sub__( self , __lowercase ): """simple docstring""" __A : int = len(self ) if size == len(__lowercase ): __A : str = [self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )] return Vector(__lowercase ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , __lowercase ): """simple docstring""" ... @overload def __mul__( self , __lowercase ): """simple docstring""" ... def __mul__( self , __lowercase ): """simple docstring""" if isinstance(__lowercase , (float, int) ): __A : Dict = [c * other for c in self.__components] return Vector(__lowercase ) elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ): __A : Optional[Any] = len(self ) __A : Optional[int] = [self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )] return sum(__lowercase ) else: # error case raise Exception('invalid operand!' ) def snake_case__ ( self ): """simple docstring""" return Vector(self.__components ) def snake_case__ ( self , __lowercase ): """simple docstring""" if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" assert -len(self.__components ) <= pos < len(self.__components ) __A : str = value def snake_case__ ( self ): """simple docstring""" if len(self.__components ) == 0: raise Exception('Vector is empty' ) __A : List[Any] = [c**2 for c in self.__components] return math.sqrt(sum(__lowercase ) ) def snake_case__ ( self , __lowercase , __lowercase = False ): """simple docstring""" __A : List[Any] = self * other __A : Any = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _lowercase ( UpperCamelCase__ : int ): assert isinstance(UpperCamelCase__, UpperCamelCase__ ) return Vector([0] * dimension ) def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int ): assert isinstance(UpperCamelCase__, UpperCamelCase__ ) and (isinstance(UpperCamelCase__, UpperCamelCase__ )) __A : List[str] = [0] * dimension __A : Tuple = 1 return Vector(UpperCamelCase__ ) def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : Vector, UpperCamelCase__ : Vector ): assert ( isinstance(UpperCamelCase__, UpperCamelCase__ ) and isinstance(UpperCamelCase__, UpperCamelCase__ ) and (isinstance(UpperCamelCase__, (int, float) )) ) return x * scalar + y def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ): random.seed(UpperCamelCase__ ) __A : Any = [random.randint(UpperCamelCase__, UpperCamelCase__ ) for _ in range(UpperCamelCase__ )] return Vector(UpperCamelCase__ ) class _lowerCamelCase : '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase ): """simple docstring""" __A : str = matrix __A : str = w __A : Dict = h def __str__( self ): """simple docstring""" __A : Tuple = '' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , __lowercase ): """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __A : Union[str, Any] = [] for i in range(self.__height ): __A : List[Any] = [ self.__matrix[i][j] + other.component(__lowercase , __lowercase ) for j in range(self.__width ) ] matrix.append(__lowercase ) return Matrix(__lowercase , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , __lowercase ): """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __A : Dict = [] for i in range(self.__height ): __A : str = [ self.__matrix[i][j] - other.component(__lowercase , __lowercase ) for j in range(self.__width ) ] matrix.append(__lowercase ) return Matrix(__lowercase , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , __lowercase ): """simple docstring""" ... @overload def __mul__( self , __lowercase ): """simple docstring""" ... def __mul__( self , __lowercase ): """simple docstring""" if isinstance(__lowercase , __lowercase ): # matrix-vector if len(__lowercase ) == self.__width: __A : Tuple = zero_vector(self.__height ) for i in range(self.__height ): __A : Any = [ self.__matrix[i][j] * other.component(__lowercase ) for j in range(self.__width ) ] ans.change_component(__lowercase , sum(__lowercase ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(__lowercase , (int, float) ): # matrix-scalar __A : Optional[Any] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__lowercase , self.__width , self.__height ) return None def snake_case__ ( self ): """simple docstring""" return self.__height def snake_case__ ( self ): """simple docstring""" return self.__width def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def snake_case__ ( self , __lowercase , __lowercase , __lowercase ): """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __A : Dict = value else: raise Exception('change_component: indices out of bounds' ) def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square' ) __A : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__lowercase ) ): __A : Dict = minor[i][:y] + minor[i][y + 1 :] return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant() def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__lowercase , __lowercase ) else: raise Exception('Indices out of bounds' ) def snake_case__ ( self ): """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __A : List[str] = [ self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width ) ] return sum(__lowercase ) def _lowercase ( UpperCamelCase__ : int ): __A : list[list[float]] = [[0] * n for _ in range(UpperCamelCase__ )] return Matrix(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ): random.seed(UpperCamelCase__ ) __A : list[list[float]] = [ [random.randint(UpperCamelCase__, UpperCamelCase__ ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ ) ] return Matrix(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
540
0
"""simple docstring""" import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel _lowerCAmelCase = { """gwf-440k""": { """url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""", """sample_rate""": 4_8_0_0_0, """sample_size""": 6_5_5_3_6, }, """jmann-small-190k""": { """url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""", """sample_rate""": 4_8_0_0_0, """sample_size""": 6_5_5_3_6, }, """jmann-large-580k""": { """url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""", """sample_rate""": 4_8_0_0_0, """sample_size""": 1_3_1_0_7_2, }, """maestro-uncond-150k""": { """url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""", """sample_rate""": 1_6_0_0_0, """sample_size""": 6_5_5_3_6, }, """unlocked-uncond-250k""": { """url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""", """sample_rate""": 1_6_0_0_0, """sample_size""": 6_5_5_3_6, }, """honk-140k""": { """url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""", """sample_rate""": 1_6_0_0_0, """sample_size""": 6_5_5_3_6, }, } def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' return torch.atana(_lowerCamelCase , _lowerCamelCase ) / math.pi * 2 def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = torch.sin(t * math.pi / 2 ) ** 2 _lowerCAmelCase : Any = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCamelCase , _lowerCamelCase ) class __UpperCamelCase ( a__ ): pass class __UpperCamelCase ( nn.Module ): def __init__( self ,_A ): '''simple docstring''' super().__init__() _lowerCAmelCase : str = DiffusionAttnUnetaD(_A ,n_attn_layers=4 ) _lowerCAmelCase : List[str] = deepcopy(self.diffusion ) _lowerCAmelCase : Any = torch.quasirandom.SobolEngine(1 ,scramble=_A ) def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = MODELS_MAP[model_name]['url'] os.system(f"""wget {url} ./""" ) return f"""./{model_name}.ckpt""" _lowerCAmelCase = { """1""": """resnets.0""", """2""": """attentions.0""", """3""": """resnets.1""", """4""": """attentions.1""", """5""": """resnets.2""", """6""": """attentions.2""", } _lowerCAmelCase = { """8""": """resnets.0""", """9""": """attentions.0""", """10""": """resnets.1""", """11""": """attentions.1""", """12""": """resnets.2""", """13""": """attentions.2""", } _lowerCAmelCase = { """1""": """resnets.0""", """2""": """attentions.0""", """3""": """resnets.1""", """4""": """attentions.1""", """5""": """resnets.2""", """6""": """attentions.2""", """8""": """resnets.3""", """9""": """attentions.3""", """10""": """resnets.4""", """11""": """attentions.4""", """12""": """resnets.5""", """13""": """attentions.5""", } _lowerCAmelCase = { """0""": """resnets.0""", """1""": """resnets.1""", """2""": """resnets.2""", """4""": """resnets.0""", """5""": """resnets.1""", """6""": """resnets.2""", } _lowerCAmelCase = { """skip""": """conv_skip""", """main.0""": """conv_1""", """main.1""": """group_norm_1""", """main.3""": """conv_2""", """main.4""": """group_norm_2""", } _lowerCAmelCase = { """norm""": """group_norm""", """qkv_proj""": ["""query""", """key""", """value"""], """out_proj""": ["""proj_attn"""], } def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(f"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(_lowerCamelCase ) and not isinstance(_lowerCamelCase , _lowerCamelCase ): return name.replace(_lowerCamelCase , _lowerCamelCase ) elif name.startswith(_lowerCamelCase ): return [name.replace(_lowerCamelCase , _lowerCamelCase ) for v in value] raise ValueError(f"""Attn error with {name}""" ) def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=13 ): '''simple docstring''' _lowerCAmelCase : Any = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) _lowerCAmelCase : List[Any] = 0 if string.startswith('net.3.' ): depth += 1 _lowerCAmelCase : Dict = string[6:] elif string.startswith('net.' ): _lowerCAmelCase : str = string[4:] while string.startswith('main.7.' ): depth += 1 _lowerCAmelCase : List[Any] = string[7:] if string.startswith('main.' ): _lowerCAmelCase : List[Any] = string[5:] # mid block if string[:2].isdigit(): _lowerCAmelCase : List[Any] = string[:2] _lowerCAmelCase : Optional[int] = string[2:] else: _lowerCAmelCase : Optional[int] = string[0] _lowerCAmelCase : Dict = string[1:] if depth == max_depth: _lowerCAmelCase : int = MID_NUM_TO_LAYER[layer_num] _lowerCAmelCase : List[str] = 'mid_block' elif depth > 0 and int(_lowerCamelCase ) < 7: _lowerCAmelCase : str = DOWN_NUM_TO_LAYER[layer_num] _lowerCAmelCase : List[str] = f"""down_blocks.{depth}""" elif depth > 0 and int(_lowerCamelCase ) > 7: _lowerCAmelCase : List[Any] = UP_NUM_TO_LAYER[layer_num] _lowerCAmelCase : List[str] = f"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: _lowerCAmelCase : Optional[int] = DEPTH_0_TO_LAYER[layer_num] _lowerCAmelCase : int = f"""up_blocks.{max_depth - 1}""" if int(_lowerCamelCase ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" ) _lowerCAmelCase : Optional[int] = string_left[1:] if "resnets" in new_layer: _lowerCAmelCase : int = convert_resconv_naming(_lowerCamelCase ) elif "attentions" in new_layer: _lowerCAmelCase : Tuple = convert_attn_naming(_lowerCamelCase ) _lowerCAmelCase : int = new_string_left if not isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : List[Any] = prefix + '.' + new_layer + '.' + string_left else: _lowerCAmelCase : Optional[int] = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[Any] = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue _lowerCAmelCase : Optional[Any] = rename(_lowerCamelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : int = transform_conv_attns(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: _lowerCAmelCase : str = v return new_state_dict def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if len(_lowerCamelCase ) == 1: if len(v.shape ) == 3: # weight _lowerCAmelCase : int = v[:, :, 0] else: # bias _lowerCAmelCase : Optional[Any] = v else: # qkv matrices _lowerCAmelCase : Optional[int] = v.shape[0] _lowerCAmelCase : Optional[int] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _lowerCAmelCase : Tuple = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _lowerCAmelCase : int = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _lowerCAmelCase : Union[str, Any] = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" _lowerCAmelCase : List[Any] = download(_lowerCamelCase ) _lowerCAmelCase : str = MODELS_MAP[model_name]['sample_rate'] _lowerCAmelCase : Any = MODELS_MAP[model_name]['sample_size'] _lowerCAmelCase : int = Object() _lowerCAmelCase : Optional[int] = sample_size _lowerCAmelCase : Any = sample_rate _lowerCAmelCase : Any = 0 _lowerCAmelCase : List[str] = UNetaDModel(sample_size=_lowerCamelCase , sample_rate=_lowerCamelCase ) _lowerCAmelCase : List[str] = diffusers_model.state_dict() _lowerCAmelCase : Optional[int] = DiffusionUncond(_lowerCamelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCamelCase )['state_dict'] ) _lowerCAmelCase : str = orig_model.diffusion_ema.eval() _lowerCAmelCase : Any = orig_model.state_dict() _lowerCAmelCase : List[Any] = rename_orig_weights(_lowerCamelCase ) _lowerCAmelCase : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _lowerCAmelCase : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCamelCase ) == 0, f"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(_lowerCamelCase ) ), f"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": _lowerCAmelCase : Dict = value.squeeze() _lowerCAmelCase : List[str] = value diffusers_model.load_state_dict(_lowerCamelCase ) _lowerCAmelCase : int = 100 _lowerCAmelCase : int = 33 _lowerCAmelCase : Any = IPNDMScheduler(num_train_timesteps=_lowerCamelCase ) _lowerCAmelCase : Union[str, Any] = torch.manual_seed(_lowerCamelCase ) _lowerCAmelCase : Tuple = torch.randn([1, 2, config.sample_size] , generator=_lowerCamelCase ).to(_lowerCamelCase ) _lowerCAmelCase : int = torch.linspace(1 , 0 , steps + 1 , device=_lowerCamelCase )[:-1] _lowerCAmelCase : Union[str, Any] = get_crash_schedule(_lowerCamelCase ) _lowerCAmelCase : Any = DanceDiffusionPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase ) _lowerCAmelCase : List[Any] = torch.manual_seed(33 ) _lowerCAmelCase : Optional[Any] = pipe(num_inference_steps=_lowerCamelCase , generator=_lowerCamelCase ).audios _lowerCAmelCase : List[str] = sampling.iplms_sample(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {} ) _lowerCAmelCase : Any = generated.clamp(-1 , 1 ) _lowerCAmelCase : Dict = (generated - audio).abs().sum() _lowerCAmelCase : Any = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , _lowerCamelCase ) print('Diff max' , _lowerCamelCase ) assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/""" print(f"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") _lowerCAmelCase = parser.parse_args() main(args)
259
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __UpperCamelCase : def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,): '''simple docstring''' _lowerCAmelCase : str = parent _lowerCAmelCase : Optional[int] = batch_size _lowerCAmelCase : Dict = seq_length _lowerCAmelCase : Union[str, Any] = is_training _lowerCAmelCase : Tuple = use_token_type_ids _lowerCAmelCase : str = use_labels _lowerCAmelCase : Optional[Any] = vocab_size _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Optional[int] = num_attention_heads _lowerCAmelCase : Optional[Any] = intermediate_size _lowerCAmelCase : Dict = hidden_act _lowerCAmelCase : Optional[int] = hidden_dropout_prob _lowerCAmelCase : str = attention_probs_dropout_prob _lowerCAmelCase : Optional[int] = max_position_embeddings _lowerCAmelCase : Any = type_vocab_size _lowerCAmelCase : int = type_sequence_label_size _lowerCAmelCase : int = initializer_range _lowerCAmelCase : List[str] = num_labels _lowerCAmelCase : Any = num_choices _lowerCAmelCase : Tuple = scope _lowerCAmelCase : int = self.vocab_size - 1 def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowerCAmelCase : List[str] = None if self.use_token_type_ids: _lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) _lowerCAmelCase : Any = None _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : List[str] = None if self.use_labels: _lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices ) _lowerCAmelCase : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) _lowerCAmelCase : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ): '''simple docstring''' _lowerCAmelCase : Tuple = OpenAIGPTModel(config=_A ) model.to(_A ) model.eval() _lowerCAmelCase : Dict = model(_A ,token_type_ids=_A ,head_mask=_A ) _lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A ) _lowerCAmelCase : List[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = OpenAIGPTLMHeadModel(_A ) model.to(_A ) model.eval() _lowerCAmelCase : Union[str, Any] = model(_A ,token_type_ids=_A ,labels=_A ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ): '''simple docstring''' _lowerCAmelCase : str = OpenAIGPTDoubleHeadsModel(_A ) model.to(_A ) model.eval() _lowerCAmelCase : Optional[int] = model(_A ,token_type_ids=_A ,labels=_A ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,*_A ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.num_labels _lowerCAmelCase : Any = OpenAIGPTForSequenceClassification(_A ) model.to(_A ) model.eval() _lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCAmelCase : Tuple = model(_A ,token_type_ids=_A ,labels=_A ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : str = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ), ( _lowerCAmelCase ), ( _lowerCAmelCase ), ( _lowerCAmelCase ), ( _lowerCAmelCase ), ( _lowerCAmelCase ), ( _lowerCAmelCase ), ) : int = config_and_inputs _lowerCAmelCase : Any = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ): _UpperCAmelCase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _UpperCAmelCase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _UpperCAmelCase = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __lowerCamelCase ( self ,_A ,_A ,_A=False ): '''simple docstring''' _lowerCAmelCase : int = super()._prepare_for_class(_A ,_A ,return_labels=_A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _lowerCAmelCase : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_A ,) _lowerCAmelCase : int = inputs_dict['labels'] _lowerCAmelCase : Tuple = inputs_dict['labels'] _lowerCAmelCase : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_A ,) _lowerCAmelCase : Dict = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_A ) return inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = OpenAIGPTModelTester(self ) _lowerCAmelCase : Union[str, Any] = ConfigTester(self ,config_class=_A ,n_embd=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_A ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_A ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_A ) def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : List[Any] = OpenAIGPTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' ) model.to(_A ) _lowerCAmelCase : Any = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_A ) # the president is _lowerCAmelCase : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _lowerCAmelCase : str = model.generate(_A ,do_sample=_A ) self.assertListEqual(output_ids[0].tolist() ,_A )
259
1
import baseaa def a ( A__ ) -> Union[str, Any]: '''simple docstring''' return baseaa.aaaencode(string.encode('''utf-8''' ) ) def a ( A__ ) -> List[str]: '''simple docstring''' return baseaa.aaadecode(snake_case_ ).decode('''utf-8''' ) if __name__ == "__main__": import doctest doctest.testmod()
719
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() a_ :Tuple = logging.get_logger(__name__) a_ :List[str] = 'Hello world! cécé herlolip' def a ( A__ , A__ , A__ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(A__ ) roberta.eval() # disable dropout SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.sentence_encoder SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , A__ ) SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ ) model.eval() # Now let's copy all the weights. # Embeddings SCREAMING_SNAKE_CASE__ : List[str] = roberta_sent_encoder.embed_tokens.weight SCREAMING_SNAKE_CASE__ : List[Any] = roberta_sent_encoder.embed_positions.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.weight SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i] SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn_layer_norm.weight SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn_layer_norm.bias # self attention SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.q_proj.weight SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.q_proj.bias SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.k_proj.bias SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.v_proj.weight SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.bias # self-attention output SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.final_layer_norm.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.final_layer_norm.bias # intermediate SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias # output SCREAMING_SNAKE_CASE__ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight SCREAMING_SNAKE_CASE__ : Any = roberta_layer.fca.bias # end of layer if classification_head: SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads['''mnli'''].dense.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.dense.bias SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.weight SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1 SCREAMING_SNAKE_CASE__ : List[str] = model(A__ )[0] if classification_head: SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) ) else: SCREAMING_SNAKE_CASE__ : Dict = roberta.model(A__ )[0] print(our_output.shape , their_output.shape ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item() print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) if __name__ == "__main__": a_ :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) a_ :str = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
250
0
"""simple docstring""" from __future__ import annotations import typing from collections import Counter def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = Counter() for base in range(1 ,max_perimeter + 1 ): for perpendicular in range(lowercase ,max_perimeter + 1 ): _UpperCAmelCase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(lowercase ): _UpperCAmelCase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __UpperCAmelCase ( lowercase = 10_00 ): """simple docstring""" _UpperCAmelCase = pythagorean_triple(lowercase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F'''Perimeter {solution()} has maximum solutions''')
277
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = 0 @slow def lowerCAmelCase_ ( self : List[str] ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(__lowerCAmelCase ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(__lowerCAmelCase ) , 0 ) def lowerCAmelCase_ ( self : Union[str, Any] ): _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowerCAmelCase_ ( self : List[str] ): _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) # Check that tokenizer_type ≠ model_type _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def lowerCAmelCase_ ( self : List[str] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__lowerCAmelCase , """vocab.txt""" ) ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""bert""" , use_fast=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__lowerCAmelCase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__lowerCAmelCase , """merges.txt""" ) ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""gpt2""" , use_fast=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @require_tokenizers def lowerCAmelCase_ ( self : Tuple ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__lowerCAmelCase , """vocab.txt""" ) ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""bert""" ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__lowerCAmelCase , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__lowerCAmelCase , """merges.txt""" ) ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type="""gpt2""" ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): with pytest.raises(__lowerCAmelCase ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def lowerCAmelCase_ ( self : Optional[int] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: _UpperCAmelCase = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCAmelCase ) else: self.assertEqual(tokenizer.do_lower_case , __lowerCAmelCase ) self.assertEqual(tokenizer.model_max_length , 512 ) @require_tokenizers def lowerCAmelCase_ ( self : List[str] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( __lowerCAmelCase , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): _UpperCAmelCase = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def lowerCAmelCase_ ( self : Optional[Any] ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai _UpperCAmelCase = TOKENIZER_MAPPING.values() _UpperCAmelCase = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(__lowerCAmelCase ) @require_tokenizers def lowerCAmelCase_ ( self : Optional[Any] ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase ) , __lowerCAmelCase ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __lowerCAmelCase ) @require_tokenizers def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__lowerCAmelCase ) _UpperCAmelCase = """Hello, world. How are you?""" _UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase ) self.assertEqual("""[UNK]""" , tokens[0] ) _UpperCAmelCase = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__lowerCAmelCase ) _UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(tokenizer.model_max_length , 512 ) self.assertEqual(tokenizer.vocab_size , 3_0000 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def lowerCAmelCase_ ( self : str ): # Check we can load the tokenizer config of an online model. _UpperCAmelCase = get_tokenizer_config("""bert-base-cased""" ) _UpperCAmelCase = config.pop("""_commit_hash""" , __lowerCAmelCase ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(__lowerCAmelCase , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. _UpperCAmelCase = get_tokenizer_config(__lowerCAmelCase ) self.assertDictEqual(__lowerCAmelCase , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = get_tokenizer_config(__lowerCAmelCase ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def lowerCAmelCase_ ( self : List[Any] ): try: AutoConfig.register("""custom""" , __lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) _UpperCAmelCase = CustomTokenizer.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCAmelCase_ ( self : List[Any] ): try: AutoConfig.register("""custom""" , __lowerCAmelCase ) # Can register in two steps AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( __lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = BertTokenizerFast.from_pretrained(__lowerCAmelCase ) bert_tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = CustomTokenizerFast.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCAmelCase_ ( self : List[str] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowerCAmelCase ): _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCAmelCase ): _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(__lowerCAmelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def lowerCAmelCase_ ( self : List[str] ): class a ( lowerCAmelCase_ ): _snake_case : Tuple = False class a ( lowerCAmelCase_ ): _snake_case : Optional[int] = NewTokenizer _snake_case : Any = False try: AutoConfig.register("""custom""" , __lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase ) # If remote code is not set, the default is to use local _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version _UpperCAmelCase = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def lowerCAmelCase_ ( self : List[Any] ): with self.assertRaisesRegex( __lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ): _UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base""" ) def lowerCAmelCase_ ( self : int ): with self.assertRaisesRegex( __lowerCAmelCase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" ) def lowerCAmelCase_ ( self : List[str] ): # Make sure we have cached the tokenizer. _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: _UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
277
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class a__ ( _UpperCAmelCase ): snake_case__ = '''instructblip_vision_model''' def __init__( self : Union[str, Any] ,a__ : List[str]=1408 ,a__ : Dict=6144 ,a__ : Optional[int]=39 ,a__ : Dict=16 ,a__ : Tuple=224 ,a__ : int=14 ,a__ : Optional[int]="gelu" ,a__ : Dict=1E-6 ,a__ : List[Any]=0.0 ,a__ : Tuple=1E-10 ,a__ : Union[str, Any]=True ,**a__ : Optional[Any] ,) -> Any: """simple docstring""" super().__init__(**lowercase_) _lowerCAmelCase:List[Any] = hidden_size _lowerCAmelCase:Tuple = intermediate_size _lowerCAmelCase:int = num_hidden_layers _lowerCAmelCase:Union[str, Any] = num_attention_heads _lowerCAmelCase:Dict = patch_size _lowerCAmelCase:Tuple = image_size _lowerCAmelCase:Optional[int] = initializer_range _lowerCAmelCase:str = attention_dropout _lowerCAmelCase:str = layer_norm_eps _lowerCAmelCase:List[Any] = hidden_act _lowerCAmelCase:Tuple = qkv_bias @classmethod def __UpperCamelCase ( cls : List[str] ,a__ : Union[str, os.PathLike] ,**a__ : List[Any]) -> Any: """simple docstring""" cls._set_token_in_kwargs(lowercase_) _lowerCAmelCase:List[Any] = cls.get_config_dict(lowercase_ ,**lowercase_) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('''model_type''') == "instructblip": _lowerCAmelCase:List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(lowercase_ ,**lowercase_) class a__ ( _UpperCAmelCase ): snake_case__ = '''instructblip_qformer''' def __init__( self : Any ,a__ : Optional[Any]=3_0522 ,a__ : List[str]=768 ,a__ : int=12 ,a__ : List[Any]=12 ,a__ : int=3072 ,a__ : str="gelu" ,a__ : Union[str, Any]=0.1 ,a__ : Optional[int]=0.1 ,a__ : str=512 ,a__ : Union[str, Any]=0.02 ,a__ : Dict=1E-12 ,a__ : Optional[int]=0 ,a__ : Union[str, Any]="absolute" ,a__ : Union[str, Any]=2 ,a__ : Dict=1408 ,**a__ : int ,) -> str: """simple docstring""" super().__init__(pad_token_id=lowercase_ ,**lowercase_) _lowerCAmelCase:Tuple = vocab_size _lowerCAmelCase:List[Any] = hidden_size _lowerCAmelCase:List[Any] = num_hidden_layers _lowerCAmelCase:int = num_attention_heads _lowerCAmelCase:Dict = hidden_act _lowerCAmelCase:str = intermediate_size _lowerCAmelCase:Tuple = hidden_dropout_prob _lowerCAmelCase:Dict = attention_probs_dropout_prob _lowerCAmelCase:Union[str, Any] = max_position_embeddings _lowerCAmelCase:List[str] = initializer_range _lowerCAmelCase:Union[str, Any] = layer_norm_eps _lowerCAmelCase:Tuple = position_embedding_type _lowerCAmelCase:Tuple = cross_attention_frequency _lowerCAmelCase:List[Any] = encoder_hidden_size @classmethod def __UpperCamelCase ( cls : List[str] ,a__ : Union[str, os.PathLike] ,**a__ : Optional[Any]) -> Optional[int]: """simple docstring""" cls._set_token_in_kwargs(lowercase_) _lowerCAmelCase:List[str] = cls.get_config_dict(lowercase_ ,**lowercase_) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('''model_type''') == "instructblip": _lowerCAmelCase:Dict = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls ,'''model_type''') and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.') return cls.from_dict(lowercase_ ,**lowercase_) class a__ ( _UpperCAmelCase ): snake_case__ = '''instructblip''' snake_case__ = True def __init__( self : Optional[Any] ,a__ : str=None ,a__ : List[str]=None ,a__ : Tuple=None ,a__ : Union[str, Any]=32 ,**a__ : List[Any]) -> int: """simple docstring""" super().__init__(**lowercase_) if vision_config is None: _lowerCAmelCase:str = {} logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''') if qformer_config is None: _lowerCAmelCase:List[Any] = {} logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''') if text_config is None: _lowerCAmelCase:Union[str, Any] = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''') _lowerCAmelCase:Any = InstructBlipVisionConfig(**lowercase_) _lowerCAmelCase:Optional[Any] = InstructBlipQFormerConfig(**lowercase_) _lowerCAmelCase:Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt""" _lowerCAmelCase:Tuple = CONFIG_MAPPING[text_model_type](**lowercase_) _lowerCAmelCase:Any = self.text_config.tie_word_embeddings _lowerCAmelCase:List[str] = self.text_config.is_encoder_decoder _lowerCAmelCase:int = num_query_tokens _lowerCAmelCase:Tuple = self.vision_config.hidden_size _lowerCAmelCase:List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowerCAmelCase:List[str] = 1.0 _lowerCAmelCase:Union[str, Any] = 0.02 @classmethod def __UpperCamelCase ( cls : str ,a__ : InstructBlipVisionConfig ,a__ : InstructBlipQFormerConfig ,a__ : PretrainedConfig ,**a__ : int ,) -> Union[str, Any]: """simple docstring""" return cls( vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**lowercase_ ,) def __UpperCamelCase ( self : List[Any]) -> Dict: """simple docstring""" _lowerCAmelCase:Optional[Any] = copy.deepcopy(self.__dict__) _lowerCAmelCase:Optional[Any] = self.vision_config.to_dict() _lowerCAmelCase:Any = self.qformer_config.to_dict() _lowerCAmelCase:Dict = self.text_config.to_dict() _lowerCAmelCase:List[str] = self.__class__.model_type return output
708
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class a__ ( UpperCamelCase_ ): snake_case__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
439
0
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : int = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, nicht wahr?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] _SCREAMING_SNAKE_CASE : Any = { """wmt16-en-de-dist-12-1""": [28.3, 27.52], """wmt16-en-de-dist-6-1""": [27.4, 27.11], """wmt16-en-de-12-1""": [26.9, 25.75], } _SCREAMING_SNAKE_CASE : Dict = F"""{src_lang}-{tgt_lang}""" _SCREAMING_SNAKE_CASE : Dict = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"allenai/{model_name}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = os.path.join(__SCREAMING_SNAKE_CASE , """README.md""" ) print(F"""Generating {path}""" ) with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(__SCREAMING_SNAKE_CASE ) # make sure we are under the root of the project lowerCAmelCase_ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase_ = repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCAmelCase_ = model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
338
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowerCAmelCase_ = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) lowerCAmelCase_ = dataset.iloc[:, 1:2].values lowerCAmelCase_ = dataset.iloc[:, 2].values lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = train_test_split(X, y, test_size=0.2, random_state=0) lowerCAmelCase_ = PolynomialFeatures(degree=4) lowerCAmelCase_ = poly_reg.fit_transform(X) lowerCAmelCase_ = LinearRegression() pol_reg.fit(X_poly, y) def lowerCamelCase_()-> str: plt.scatter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , color="""red""" ) plt.plot(__SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(__SCREAMING_SNAKE_CASE ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
338
1
"""simple docstring""" import math def snake_case__ ( ) ->None: """simple docstring""" __lowercase : Optional[int] = input("Enter message: " ) __lowercase : Dict = int(input(F'Enter key [2-{len(_lowerCamelCase ) - 1}]: ' ) ) __lowercase : Tuple = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): __lowercase : List[str] = encrypt_message(_lowerCamelCase, _lowerCamelCase ) elif mode.lower().startswith("d" ): __lowercase : Union[str, Any] = decrypt_message(_lowerCamelCase, _lowerCamelCase ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F'Output:\n{text + "|"}' ) def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str: """simple docstring""" __lowercase : Any = [""] * key for col in range(_lowerCamelCase ): __lowercase : List[str] = col while pointer < len(_lowerCamelCase ): cipher_text[col] += message[pointer] pointer += key return "".join(_lowerCamelCase ) def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str: """simple docstring""" __lowercase : Optional[Any] = math.ceil(len(_lowerCamelCase ) / key ) __lowercase : Any = key __lowercase : Optional[int] = (num_cols * num_rows) - len(_lowerCamelCase ) __lowercase : List[Any] = [""] * num_cols __lowercase : Union[str, Any] = 0 __lowercase : Optional[int] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): __lowercase : Union[str, Any] = 0 row += 1 return "".join(_lowerCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
281
"""simple docstring""" import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def snake_case__ ( _lowerCamelCase ) ->Dict: """simple docstring""" return x + 2 class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def snake_case ( self : List[str] ): __lowercase : List[Any] = "x = 3" __lowercase : Optional[int] = {} __lowercase : List[Any] = evaluate(lowercase__ , {} , state=lowercase__ ) assert result == 3 self.assertDictEqual(lowercase__ , {"x": 3} ) __lowercase : Optional[int] = "x = y" __lowercase : Union[str, Any] = {"y": 5} __lowercase : str = evaluate(lowercase__ , {} , state=lowercase__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowercase__ , {"x": 5, "y": 5} ) def snake_case ( self : Dict ): __lowercase : Dict = "y = add_two(x)" __lowercase : List[Any] = {"x": 3} __lowercase : Optional[int] = evaluate(lowercase__ , {"add_two": add_two} , state=lowercase__ ) assert result == 5 self.assertDictEqual(lowercase__ , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: __lowercase : Any = evaluate(lowercase__ , {} , state=lowercase__ ) assert result is None assert "tried to execute add_two" in out.out def snake_case ( self : Any ): __lowercase : Optional[Any] = "x = 3" __lowercase : List[Any] = {} __lowercase : Dict = evaluate(lowercase__ , {} , state=lowercase__ ) assert result == 3 self.assertDictEqual(lowercase__ , {"x": 3} ) def snake_case ( self : str ): __lowercase : Optional[int] = "test_dict = {'x': x, 'y': add_two(x)}" __lowercase : List[str] = {"x": 3} __lowercase : Tuple = evaluate(lowercase__ , {"add_two": add_two} , state=lowercase__ ) self.assertDictEqual(lowercase__ , {"x": 3, "y": 5} ) self.assertDictEqual(lowercase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def snake_case ( self : int ): __lowercase : Tuple = "x = 3\ny = 5" __lowercase : Optional[Any] = {} __lowercase : List[Any] = evaluate(lowercase__ , {} , state=lowercase__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowercase__ , {"x": 3, "y": 5} ) def snake_case ( self : Any ): __lowercase : str = "text = f'This is x: {x}.'" __lowercase : Any = {"x": 3} __lowercase : List[Any] = evaluate(lowercase__ , {} , state=lowercase__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(lowercase__ , {"x": 3, "text": "This is x: 3."} ) def snake_case ( self : str ): __lowercase : Optional[Any] = "if x <= 3:\n y = 2\nelse:\n y = 5" __lowercase : int = {"x": 3} __lowercase : Optional[int] = evaluate(lowercase__ , {} , state=lowercase__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(lowercase__ , {"x": 3, "y": 2} ) __lowercase : Optional[int] = {"x": 8} __lowercase : Union[str, Any] = evaluate(lowercase__ , {} , state=lowercase__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowercase__ , {"x": 8, "y": 5} ) def snake_case ( self : Dict ): __lowercase : Optional[Any] = "test_list = [x, add_two(x)]" __lowercase : str = {"x": 3} __lowercase : Any = evaluate(lowercase__ , {"add_two": add_two} , state=lowercase__ ) self.assertListEqual(lowercase__ , [3, 5] ) self.assertDictEqual(lowercase__ , {"x": 3, "test_list": [3, 5]} ) def snake_case ( self : Optional[int] ): __lowercase : str = "y = x" __lowercase : str = {"x": 3} __lowercase : int = evaluate(lowercase__ , {} , state=lowercase__ ) assert result == 3 self.assertDictEqual(lowercase__ , {"x": 3, "y": 3} ) def snake_case ( self : Optional[int] ): __lowercase : str = "test_list = [x, add_two(x)]\ntest_list[1]" __lowercase : Optional[Any] = {"x": 3} __lowercase : Any = evaluate(lowercase__ , {"add_two": add_two} , state=lowercase__ ) assert result == 5 self.assertDictEqual(lowercase__ , {"x": 3, "test_list": [3, 5]} ) __lowercase : Tuple = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" __lowercase : Union[str, Any] = {"x": 3} __lowercase : Tuple = evaluate(lowercase__ , {"add_two": add_two} , state=lowercase__ ) assert result == 5 self.assertDictEqual(lowercase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def snake_case ( self : Dict ): __lowercase : Dict = "x = 0\nfor i in range(3):\n x = i" __lowercase : str = {} __lowercase : int = evaluate(lowercase__ , {"range": range} , state=lowercase__ ) assert result == 2 self.assertDictEqual(lowercase__ , {"x": 2, "i": 2} )
281
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def A__ ( lowercase: Any ) -> Tuple: A : Tuple =FileLock(str(tmpdir / 'foo.lock' ) ) A : Optional[int] =FileLock(str(tmpdir / 'foo.lock' ) ) A : Union[str, Any] =0.01 with locka.acquire(): with pytest.raises(lowercase ): A : Optional[Any] =time.time() locka.acquire(lowercase ) assert time.time() - _start > timeout def A__ ( lowercase: int ) -> Optional[int]: A : Any ='a' * 1_000 + '.lock' A : Optional[Any] =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(lowercase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A : str =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(lowercase ): locka.acquire(0 )
305
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _lowercase : List[Any] =logging.get_logger(__name__) _lowercase : str ={ '''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''', # See all Marian models at https://huggingface.co/models?filter=marian } class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ): '''simple docstring''' lowercase : Optional[int] = "marian" lowercase : Dict = ["past_key_values"] lowercase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_81_01 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=10_24 , SCREAMING_SNAKE_CASE__ : Any=12 , SCREAMING_SNAKE_CASE__ : Tuple=40_96 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[str]=40_96 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : str=10_24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : int=5_81_00 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[int]=5_81_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Union[str, Any]: A : Optional[Any] =vocab_size A : List[Any] =decoder_vocab_size or vocab_size A : Optional[Any] =max_position_embeddings A : Optional[int] =d_model A : Dict =encoder_ffn_dim A : List[str] =encoder_layers A : Optional[Any] =encoder_attention_heads A : str =decoder_ffn_dim A : Optional[Any] =decoder_layers A : int =decoder_attention_heads A : Optional[Any] =dropout A : Tuple =attention_dropout A : str =activation_dropout A : int =activation_function A : int =init_std A : Union[str, Any] =encoder_layerdrop A : str =decoder_layerdrop A : List[str] =use_cache A : Optional[int] =encoder_layers A : Tuple =scale_embedding # scale factor will be sqrt(d_model) if True A : Tuple =share_encoder_decoder_embeddings super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ): '''simple docstring''' @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: A : Any =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: A : Optional[Any] ={0: 'batch'} A : Optional[int] ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A : Tuple ={0: 'batch', 1: 'decoder_sequence'} A : List[Any] ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. A : Dict =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: A , A : List[Any] =self.num_layers for i in range(SCREAMING_SNAKE_CASE__ ): A : Dict ={0: 'batch', 2: 'past_sequence + sequence'} A : Any ={0: 'batch', 2: 'past_sequence + sequence'} else: A : List[str] =OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: A : List[Any] =super().outputs else: A : Optional[Any] =super(SCREAMING_SNAKE_CASE__ , self ).outputs if self.use_past: A , A : List[str] =self.num_layers for i in range(SCREAMING_SNAKE_CASE__ ): A : Optional[int] ={0: 'batch', 2: 'past_sequence + sequence'} A : Union[str, Any] ={0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: A : str =self._generate_dummy_inputs_for_encoder_and_decoder( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Generate decoder inputs A : List[Any] =seq_length if not self.use_past else 1 A : Dict =self._generate_dummy_inputs_for_encoder_and_decoder( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A : List[str] ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} A : Optional[Any] =dict(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A , A : List[Any] =common_inputs['input_ids'].shape A : Tuple =common_inputs['decoder_input_ids'].shape[1] A , A : Union[str, Any] =self.num_attention_heads A : List[str] =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A : List[Any] =decoder_seq_length + 3 A : str =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A : Tuple =torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] , dim=1 ) A : List[Any] =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered A , A : List[Any] =self.num_layers A : Optional[Any] =min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A : Optional[int] =max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) - min_num_layers A : str ='encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(SCREAMING_SNAKE_CASE__ ): common_inputs["past_key_values"].append( ( torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ ), ) ) # TODO: test this. A : List[str] =encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: A : str =self._generate_dummy_inputs_for_encoder_and_decoder( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A , A : str =common_inputs['input_ids'].shape # Not using the same length for past_key_values A : str =seqlen + 2 A , A : Dict =self.num_layers A , A : Any =self.num_attention_heads A : Optional[int] =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A : str =common_inputs['attention_mask'].dtype A : Optional[int] =torch.cat( [common_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 ) A : Dict =[ (torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ ) ] return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A : List[Any] =compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A : Union[str, Any] =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) A : Union[str, Any] =compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence A : Optional[Any] =[' '.join([tokenizer.unk_token] ) * seq_length] * batch_size A : Tuple =dict(tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: A : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) else: A : Dict =self._generate_dummy_inputs_for_causal_lm( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) return common_inputs def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: if self.task in ["default", "seq2seq-lm"]: A : List[Any] =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A : List[str] =super(SCREAMING_SNAKE_CASE__ , self )._flatten_past_key_values_( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> float: return 1e-4
305
1
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _UpperCamelCase (a__ :str , a__ :Tuple , a__ :List[str] ): """simple docstring""" UpperCamelCase__ = OmegaConf.load(__lowerCAmelCase ) UpperCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""] UpperCamelCase__ = list(state_dict.keys() ) # extract state_dict for VQVAE UpperCamelCase__ = {} UpperCamelCase__ = """first_stage_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): UpperCamelCase__ = state_dict[key] # extract state_dict for UNetLDM UpperCamelCase__ = {} UpperCamelCase__ = """model.diffusion_model.""" for key in keys: if key.startswith(__lowerCAmelCase ): UpperCamelCase__ = state_dict[key] UpperCamelCase__ = config.model.params.first_stage_config.params UpperCamelCase__ = config.model.params.unet_config.params UpperCamelCase__ = VQModel(**__lowerCAmelCase ).eval() vqvae.load_state_dict(__lowerCAmelCase ) UpperCamelCase__ = UNetLDMModel(**__lowerCAmelCase ).eval() unet.load_state_dict(__lowerCAmelCase ) UpperCamelCase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule="""scaled_linear""" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , ) UpperCamelCase__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) pipeline.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--config_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) UpperCamelCase__ = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
709
import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case : Any = MODEL_FOR_MASKED_LM_MAPPING snake_case : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING def _lowerCamelCase ( self ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" ) UpperCamelCase__ = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser"""}, ] , ) UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser""", }, ] , ) UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""}, ] , ) @require_torch def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" ) UpperCamelCase__ = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""}, ] , ) UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""}, ] , ) UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""}, ] , ) UpperCamelCase__ = unmasker("""My name is <mask> <mask>""" , top_k=2 ) self.assertEqual( nested_simplify(__lowerCAmelCase , decimals=6 ) , [ [ { """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" ) # convert model to fp16 pipe.model.half() UpperCamelCase__ = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow @require_torch def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" ) self.run_large_test(__lowerCAmelCase ) @slow @require_tf def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" ) self.run_large_test(__lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase ): UpperCamelCase__ = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""}, ] , ) UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 12790, """token_str""": """ Lyon""", }, ] , ) UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 ) self.assertEqual( nested_simplify(__lowerCAmelCase ) , [ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""}, ] , ) @require_torch def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" ) UpperCamelCase__ = None UpperCamelCase__ = None self.run_pipeline_test(__lowerCAmelCase , [] ) @require_tf def _lowerCamelCase ( self ): UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" ) UpperCamelCase__ = None UpperCamelCase__ = None self.run_pipeline_test(__lowerCAmelCase , [] ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) UpperCamelCase__ = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = fill_masker.tokenizer UpperCamelCase__ = fill_masker.model UpperCamelCase__ = fill_masker( f"""This is a {tokenizer.mask_token}""" , ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ] , ) UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ] , ) UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( __lowerCAmelCase , [ [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ], [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ], ] , ) with self.assertRaises(__lowerCAmelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(__lowerCAmelCase ): fill_masker("""This is""" ) self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase ) self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase ) self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase ) self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase ) self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = tokenizer.get_vocab() UpperCamelCase__ = sorted(vocab.keys() )[:2] # Pipeline argument UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase ) UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ] , ) UpperCamelCase__ = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase ) UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) ) # Call argument UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ] , ) UpperCamelCase__ = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase ) UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) ) # Score equivalence UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase ) UpperCamelCase__ = [top_mask["""token_str"""] for top_mask in outputs] UpperCamelCase__ = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowerCAmelCase ) == set(__lowerCAmelCase ): UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase ) UpperCamelCase__ = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) ) # Raises with invalid with self.assertRaises(__lowerCAmelCase ): UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(__lowerCAmelCase ): UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] ) with self.assertRaises(__lowerCAmelCase ): UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2 ) UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ] , ) UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __lowerCAmelCase , [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ] , ) self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = tokenizer.get_vocab() UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) # top_k=2, ntargets=3 UpperCamelCase__ = sorted(vocab.keys() )[:3] UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__lowerCAmelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results UpperCamelCase__ = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(__lowerCAmelCase ).issubset(__lowerCAmelCase ): UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__lowerCAmelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) UpperCamelCase__ = tokenizer.get_vocab() # String duplicates + id duplicates UpperCamelCase__ = sorted(vocab.keys() )[:3] UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]] UpperCamelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__lowerCAmelCase , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(__lowerCAmelCase ) , 3 ) def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) UpperCamelCase__ = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 ) self.assertEqual( __lowerCAmelCase , [ [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ], [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ], [ {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, {"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )}, ], ] , )
548
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" @require_torch def snake_case ( self ) -> str: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched A : str = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' A : Union[str, Any] = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' A : Dict = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache A : int = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network A : Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed A : List[Any] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files A : List[str] = '''1''' A : str = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def snake_case ( self ) -> List[Any]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched A : Tuple = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' A : Any = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' A : Any = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache A : Any = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network A : Any = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed A : str = self.get_env() A : Dict = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def snake_case ( self ) -> str: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched A : str = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' A : Union[str, Any] = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' A : Optional[int] = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network A : Tuple = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed A : Union[str, Any] = self.get_env() A : int = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network A : int = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files A : Optional[Any] = '''1''' A : Optional[int] = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def snake_case ( self ) -> Optional[int]: A : int = ''' from transformers import pipeline ''' A : Any = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' A : str = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' A : Tuple = self.get_env() A : Optional[Any] = '''1''' A : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] A : Optional[Any] = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def snake_case ( self ) -> Tuple: A : Any = ''' from transformers import AutoModel ''' A : Union[str, Any] = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network A : Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed A : List[str] = self.get_env() A : Union[str, Any] = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files A : str = '''1''' A : int = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
542
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class __lowercase ( unittest.TestCase ): """simple docstring""" def snake_case ( self ) -> Union[str, Any]: A : Dict = logging.get_logger() # the current default level is logging.WARNING A : List[Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__UpperCAmelCase ) def snake_case ( self ) -> str: A : Any = logging.get_verbosity() A : Optional[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) A : Tuple = '''Testing 1, 2, 3''' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__UpperCAmelCase ) as cl: logger.warning(__UpperCAmelCase ) self.assertEqual(cl.out , msg + '''\n''' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__UpperCAmelCase ) as cl: logger.warning(__UpperCAmelCase ) self.assertEqual(cl.out , '''''' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__UpperCAmelCase ) as cl: logger.warning(__UpperCAmelCase ) self.assertEqual(cl.out , msg + '''\n''' ) # restore to the original level logging.set_verbosity(__UpperCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='''error''' ) def snake_case ( self ) -> Optional[int]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var A : int = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) A : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , __UpperCAmelCase ) A : List[str] = logging.log_levels[env_level_str] A : Optional[int] = logging.get_verbosity() self.assertEqual( __UpperCAmelCase , __UpperCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level A : str = '''''' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='''super-error''' ) def snake_case ( self ) -> Optional[int]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() A : str = logging.logging.getLogger() with CaptureLogger(__UpperCAmelCase ) as cl: # this action activates the env var logging.get_logger('''transformers.models.bart.tokenization_bart''' ) self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out ) # no need to restore as nothing was changed def snake_case ( self ) -> Optional[int]: # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() A : Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' ) A : Optional[int] = '''Testing 1, 2, 3''' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ): # nothing should be logged as env var disables this method with CaptureLogger(__UpperCAmelCase ) as cl: logger.warning_advice(__UpperCAmelCase ) self.assertEqual(cl.out , '''''' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__UpperCAmelCase ) as cl: logger.warning_advice(__UpperCAmelCase ) self.assertEqual(cl.out , msg + '''\n''' ) def snake_case__ ( ): disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
542
1
import argparse import os import re lowerCamelCase : List[Any] = 'src/transformers/models/auto' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict lowerCamelCase : List[str] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict') # re pattern that matches identifiers in mappings lowerCamelCase : List[Any] = re.compile(r'\s*\(\s*"(\S[^"]+)"') def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = False ) -> str: with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f: snake_case : Dict = f.read() snake_case : Dict = content.split("""\n""" ) snake_case : int = [] snake_case : Tuple = 0 while line_idx < len(lowercase ): if _re_intro_mapping.search(lines[line_idx] ) is not None: snake_case : Optional[Any] = len(re.search(R"""^(\s*)\S""" ,lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(""" """ * indent + """(""" ): new_lines.append(lines[line_idx] ) line_idx += 1 snake_case : Optional[int] = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": snake_case : Optional[int] = line_idx while not lines[line_idx].startswith(""" """ * indent + """)""" ): line_idx += 1 blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers snake_case : Optional[int] = sorted(lowercase ,key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f: f.write("""\n""".join(lowercase ) ) elif "\n".join(lowercase ) != content: return True def SCREAMING_SNAKE_CASE__ ( lowercase = False ) -> str: snake_case : Dict = [os.path.join(lowercase ,lowercase ) for f in os.listdir(lowercase ) if f.endswith(""".py""" )] snake_case : List[str] = [sort_auto_mapping(lowercase ,overwrite=lowercase ) for fname in fnames] if not overwrite and any(lowercase ): snake_case : int = [f for f, d in zip(lowercase ,lowercase ) if d] raise ValueError( f"""The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix""" """ this.""" ) if __name__ == "__main__": lowerCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') lowerCamelCase : str = parser.parse_args() sort_all_auto_mappings(not args.check_only)
684
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCamelCase : List[str] = 3 def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int: print("""Generating primitive root of p""" ) while True: snake_case : Optional[int] = random.randrange(3 ,lowercase ) if pow(lowercase ,2 ,lowercase ) == 1: continue if pow(lowercase ,lowercase ,lowercase ) == 1: continue return g def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("""Generating prime p...""" ) snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number. snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p. snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety. snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase ) snake_case : str = (key_size, e_a, e_a, p) snake_case : Optional[Any] = (key_size, d) return public_key, private_key def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None: if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print("""\nWARNING:""" ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" """Use a different name or delete these files and re-run this program.""" ) sys.exit() snake_case , snake_case : Optional[Any] = generate_key(lowercase ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def SCREAMING_SNAKE_CASE__ ( ) -> None: print("""Making key files...""" ) make_key_files("""elgamal""" ,2048 ) print("""Key files generation successful""" ) if __name__ == "__main__": main()
684
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__( __lowercase , __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = StableDiffusionInstructPixaPixPipeline __snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} __snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS __snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ) -> str: torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) _SCREAMING_SNAKE_CASE : Tuple = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _SCREAMING_SNAKE_CASE : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) _SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextModel(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _SCREAMING_SNAKE_CASE : List[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=0 ) -> Dict: _SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] _SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ) if str(__lowerCamelCase ).startswith("mps" ): _SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() _SCREAMING_SNAKE_CASE : List[str] = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**__lowerCamelCase ).images _SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : int = "cpu" # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() _SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = "french fries" _SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = output.images _SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _SCREAMING_SNAKE_CASE : Any = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_components() _SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = [inputs["prompt"]] * 2 _SCREAMING_SNAKE_CASE : Dict = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 _SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = image / 2 + 0.5 _SCREAMING_SNAKE_CASE : Dict = image.permute(0 , 3 , 1 , 2 ) _SCREAMING_SNAKE_CASE : List[Any] = image.repeat(2 , 1 , 1 , 1 ) _SCREAMING_SNAKE_CASE : str = sd_pipe(**__lowerCamelCase ).images _SCREAMING_SNAKE_CASE : List[Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator _SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() _SCREAMING_SNAKE_CASE : Optional[int] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" ) _SCREAMING_SNAKE_CASE : Tuple = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(__lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = sd_pipe(**__lowerCamelCase ).images _SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1] _SCREAMING_SNAKE_CASE : Tuple = [round(__lowerCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(__lowerCamelCase ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) _SCREAMING_SNAKE_CASE : Dict = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() _SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = VaeImageProcessor(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : List[Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type="pt" ) )[0] _SCREAMING_SNAKE_CASE : Any = components["vae"] _SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs_by_type(__lowerCamelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): _SCREAMING_SNAKE_CASE : int = vae.encode(inputs[image_param] ).latent_dist.mode() _SCREAMING_SNAKE_CASE : Optional[int] = pipe(**__lowerCamelCase )[0] _SCREAMING_SNAKE_CASE : List[str] = np.abs(out - out_latents_inputs ).max() self.assertLess(__lowerCamelCase , 1E-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ) -> Optional[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self , __lowerCamelCase=0 ) -> List[str]: _SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[Any] = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) _SCREAMING_SNAKE_CASE : Dict = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : List[str] = self.get_inputs() _SCREAMING_SNAKE_CASE : Tuple = pipe(**__lowerCamelCase ).images _SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) _SCREAMING_SNAKE_CASE : Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs() _SCREAMING_SNAKE_CASE : Tuple = pipe(**__lowerCamelCase ).images _SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) _SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : List[Any] = self.get_inputs() _SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(**__lowerCamelCase ).images _SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) _SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : List[Any] = 0 def callback_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None: _SCREAMING_SNAKE_CASE : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _SCREAMING_SNAKE_CASE : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) _SCREAMING_SNAKE_CASE : Optional[int] = latents[0, -3:, -3:, -1] _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: _SCREAMING_SNAKE_CASE : str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) _SCREAMING_SNAKE_CASE : Optional[Any] = latents[0, -3:, -3:, -1] _SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa ) _SCREAMING_SNAKE_CASE : List[str] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs() pipe(**__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCamelCase_ ( self ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa ) _SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _SCREAMING_SNAKE_CASE : Dict = self.get_inputs() _SCREAMING_SNAKE_CASE : List[str] = pipe(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Any = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 _SCREAMING_SNAKE_CASE : Any = inputs["image"].resize((5_0_4, 5_0_4) ) _SCREAMING_SNAKE_CASE : Dict = "timbrooks/instruct-pix2pix" _SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( __lowerCamelCase , safety_checker=__lowerCamelCase , ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) pipe.enable_attention_slicing() _SCREAMING_SNAKE_CASE : int = pipe(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] _SCREAMING_SNAKE_CASE : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) _SCREAMING_SNAKE_CASE : Any = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
249
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right UpperCamelCase__ =5_0003 UpperCamelCase__ =5_0002 @require_sentencepiece @require_tokenizers class lowerCAmelCase__( __lowercase , unittest.TestCase ): '''simple docstring''' __snake_case = PLBartTokenizer __snake_case = None __snake_case = False def UpperCamelCase_ ( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing _SCREAMING_SNAKE_CASE : Union[str, Any] = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : str = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) _SCREAMING_SNAKE_CASE : List[str] = tokenizer.vocab_size _SCREAMING_SNAKE_CASE : List[str] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 4 , __lowerCamelCase )] self.assertListEqual(__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] ) _SCREAMING_SNAKE_CASE : Dict = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" _SCREAMING_SNAKE_CASE : Any = tokenizer(__lowerCamelCase ).input_ids self.assertEqual( tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Any = PLBartTokenizer(__lowerCamelCase , language_codes="multi" , keep_accents=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size _SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 7 , __lowerCamelCase )] self.assertListEqual( __lowerCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) _SCREAMING_SNAKE_CASE : List[Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" _SCREAMING_SNAKE_CASE : int = tokenizer(__lowerCamelCase ).input_ids self.assertEqual( tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' __snake_case = 'uclanlp/plbart-python-en_XX' __snake_case = [ 'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])', 'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])', ] __snake_case = [ 'Returns the maximum value of a b c.', 'Sums the values of a b c.', ] __snake_case = [ 1_3_4, 5_4_5_2, 3_3_4_6_0, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 9_8_8, 2_0, 3_3_4_5_6, 1_9, 3_3_4_5_6, 7_7_1, 3_9, 4_2_5_8, 8_8_9, 3_3_1_8, 3_3_4_4_1, 3_3_4_6_3, 3_3_4_6_5, 3_3_4_6_3, 3_3_4_4_9, 2_4_7_1, 2, PYTHON_CODE, ] @classmethod def UpperCamelCase_ ( cls ) -> Dict: _SCREAMING_SNAKE_CASE : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" ) _SCREAMING_SNAKE_CASE : Tuple = 1 return cls def UpperCamelCase_ ( self ) -> int: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> Any: self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids ) _SCREAMING_SNAKE_CASE : Dict = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2] _SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase ) _SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase ) self.assertEqual(__lowerCamelCase , __lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : str = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0] self.assertIsInstance(src_text[0] , __lowerCamelCase ) _SCREAMING_SNAKE_CASE : Any = 1_0 _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , __lowerCamelCase ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) def UpperCamelCase_ ( self ) -> str: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] ) def UpperCamelCase_ ( self ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__lowerCamelCase ) _SCREAMING_SNAKE_CASE : Dict = PLBartTokenizer.from_pretrained(__lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase ) @require_torch def UpperCamelCase_ ( self ) -> List[str]: _SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" ) _SCREAMING_SNAKE_CASE : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) _SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) self.assertEqual((2, 2_6) , batch.input_ids.shape ) self.assertEqual((2, 2_6) , batch.attention_mask.shape ) _SCREAMING_SNAKE_CASE : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __lowerCamelCase ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" ) _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt" ) _SCREAMING_SNAKE_CASE : List[Any] = targets["input_ids"] _SCREAMING_SNAKE_CASE : Any = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" ) self.assertEqual( nested_simplify(__lowerCamelCase ) , { # A, test, EOS, en_XX "input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 5_0_0_0_1, } , )
249
1
'''simple docstring''' import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) _lowerCAmelCase : Optional[int] = logging.getLogger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ) -> int: """simple docstring""" snake_case__ : Union[str, Any] = self.layer[current_layer](lowerCamelCase , lowerCamelCase , head_mask[current_layer] ) snake_case__ : Any = layer_outputs[0] return hidden_states @add_start_docstrings( 'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __lowerCamelCase , ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase ) -> int: """simple docstring""" super().__init__(lowerCamelCase ) snake_case__ : List[Any] = BertEncoderWithPabee(lowerCamelCase ) self.init_weights() snake_case__ : List[Any] = 0 snake_case__ : List[Any] = 0 snake_case__ : Optional[Any] = 0 snake_case__ : Tuple = 0 def lowercase__ ( self , lowerCamelCase ) -> Any: """simple docstring""" snake_case__ : List[str] = threshold def lowercase__ ( self , lowerCamelCase ) -> Optional[int]: """simple docstring""" snake_case__ : List[str] = patience def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" snake_case__ : List[Any] = 0 snake_case__ : Optional[int] = 0 def lowercase__ ( self ) -> Any: """simple docstring""" snake_case__ : str = self.inference_layers_num / self.inference_instances_num snake_case__ : List[str] = ( f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =''' f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***''' ) print(lowerCamelCase ) @add_start_docstrings_to_model_forward(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , ) -> List[Any]: """simple docstring""" if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: snake_case__ : Any = input_ids.size() elif inputs_embeds is not None: snake_case__ : Tuple = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) snake_case__ : str = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: snake_case__ : Union[str, Any] = torch.ones(lowerCamelCase , device=lowerCamelCase ) if token_type_ids is None: snake_case__ : List[Any] = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. snake_case__ : torch.Tensor = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: snake_case__ ,snake_case__ ,snake_case__ : int = encoder_hidden_states.size() snake_case__ : str = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: snake_case__ : Optional[int] = torch.ones(lowerCamelCase , device=lowerCamelCase ) snake_case__ : Dict = self.invert_attention_mask(lowerCamelCase ) else: snake_case__ : int = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] snake_case__ : Union[str, Any] = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers ) snake_case__ : List[str] = self.embeddings( input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase ) snake_case__ : List[Any] = embedding_output if self.training: snake_case__ : Dict = [] for i in range(self.config.num_hidden_layers ): snake_case__ : str = self.encoder.adaptive_forward( lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase ) snake_case__ : Any = self.pooler(lowerCamelCase ) snake_case__ : str = output_layers[i](output_dropout(lowerCamelCase ) ) res.append(lowerCamelCase ) elif self.patience == 0: # Use all layers for inference snake_case__ : List[str] = self.encoder( lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) snake_case__ : Any = self.pooler(encoder_outputs[0] ) snake_case__ : Optional[int] = [output_layers[self.config.num_hidden_layers - 1](lowerCamelCase )] else: snake_case__ : List[Any] = 0 snake_case__ : Any = None snake_case__ : List[Any] = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 snake_case__ : Dict = self.encoder.adaptive_forward( lowerCamelCase , current_layer=lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase ) snake_case__ : List[str] = self.pooler(lowerCamelCase ) snake_case__ : Optional[int] = output_layers[i](lowerCamelCase ) if regression: snake_case__ : List[str] = logits.detach() if patient_result is not None: snake_case__ : List[Any] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: snake_case__ : List[str] = 0 else: snake_case__ : List[Any] = logits.detach().argmax(dim=1 ) if patient_result is not None: snake_case__ : Tuple = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(lowerCamelCase ) ): patient_counter += 1 else: snake_case__ : Any = 0 snake_case__ : Any = logits if patient_counter == self.patience: break snake_case__ : str = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( 'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __lowerCamelCase , ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase ) -> Tuple: """simple docstring""" super().__init__(lowerCamelCase ) snake_case__ : Dict = config.num_labels snake_case__ : Optional[int] = BertModelWithPabee(lowerCamelCase ) snake_case__ : List[Any] = nn.Dropout(config.hidden_dropout_prob ) snake_case__ : List[str] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCamelCase ) def lowercase__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Dict: """simple docstring""" snake_case__ : Union[str, Any] = self.bert( input_ids=lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) snake_case__ : Optional[Any] = (logits[-1],) if labels is not None: snake_case__ : Optional[Any] = None snake_case__ : Optional[int] = 0 for ix, logits_item in enumerate(lowerCamelCase ): if self.num_labels == 1: # We are doing regression snake_case__ : List[Any] = MSELoss() snake_case__ : Dict = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: snake_case__ : List[Any] = CrossEntropyLoss() snake_case__ : List[str] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: snake_case__ : int = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 snake_case__ : int = (total_loss / total_weights,) + outputs return outputs
694
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : Any = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys _lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
694
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS A_ = logging.get_logger(__name__) A_ = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class lowercase( __a ): '''simple docstring''' def __init__( self: List[str], a_: Dict=None, a_: int=None, *a_: List[Any], **a_: Union[str, Any] ): '''simple docstring''' super().__init__(*a_, **a_ ) if config is None: assert isinstance(self.model, a_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f" {self.model.__class__}" ) _snake_case : Any = self.model.config else: _snake_case : int = config _snake_case : Union[str, Any] = data_args _snake_case : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for" """ padding..""" ) if self.args.label_smoothing == 0: _snake_case : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _snake_case : Dict = label_smoothed_nll_loss def UpperCamelCase_ ( self: int, a_: int ): '''simple docstring''' if self.optimizer is None: _snake_case : Optional[Any] = ["""bias""", """LayerNorm.weight"""] _snake_case : Optional[Any] = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] _snake_case : int = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _snake_case : str = Adafactor _snake_case : List[Any] = {"""scale_parameter""": False, """relative_step""": False} else: _snake_case : Any = AdamW _snake_case : Tuple = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } _snake_case : List[Any] = self.args.learning_rate if self.sharded_ddp: _snake_case : Dict = OSS( params=a_, optim=a_, **a_, ) else: _snake_case : Union[str, Any] = optimizer_cls(a_, **a_ ) if self.lr_scheduler is None: _snake_case : Optional[int] = self._get_lr_scheduler(a_ ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def UpperCamelCase_ ( self: Dict, a_: List[str] ): '''simple docstring''' _snake_case : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _snake_case : Union[str, Any] = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _snake_case : List[Any] = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps ) else: _snake_case : Tuple = schedule_func( self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ ) return scheduler def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' if isinstance(self.train_dataset, torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCamelCase_ ( self: List[str], a_: int, a_: Optional[int], a_: str ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _snake_case : int = model(**a_, use_cache=a_ )[0] _snake_case : Union[str, Any] = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) ) else: # compute usual loss via models _snake_case , _snake_case : Optional[Any] = model(**a_, labels=a_, use_cache=a_ )[:2] else: # compute label smoothed loss _snake_case : Union[str, Any] = model(**a_, use_cache=a_ )[0] _snake_case : Optional[Any] = torch.nn.functional.log_softmax(a_, dim=-1 ) _snake_case , _snake_case : List[Any] = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id ) return loss, logits def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: Union[str, Any] ): '''simple docstring''' _snake_case : Any = inputs.pop("""labels""" ) _snake_case , _snake_case : str = self._compute_loss(a_, a_, a_ ) return loss def UpperCamelCase_ ( self: Optional[int], a_: nn.Module, a_: Dict[str, Union[torch.Tensor, Any]], a_: bool, a_: Optional[List[str]] = None, ): '''simple docstring''' _snake_case : str = self._prepare_inputs(a_ ) _snake_case : List[str] = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _snake_case : List[str] = self.model.generate( inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], **a_, ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _snake_case : Union[str, Any] = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] ) _snake_case : Tuple = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data _snake_case , _snake_case : Dict = self._compute_loss(a_, a_, a_ ) _snake_case : int = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _snake_case : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _snake_case : Tuple = self._pad_tensors_to_max_len(a_, gen_kwargs["""max_length"""] ) return (loss, logits, labels) def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ): '''simple docstring''' _snake_case : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f" padded to `max_length`={max_length}" ) _snake_case : List[str] = pad_token_id * torch.ones( (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device ) _snake_case : Tuple = tensor return padded_tensor
609
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = ProphetNetTokenizer lowercase__ = False def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' super().setUp() _snake_case : Optional[Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _snake_case : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def UpperCamelCase_ ( self: List[Any], a_: Any ): '''simple docstring''' _snake_case : Dict = """UNwant\u00E9d,running""" _snake_case : Tuple = """unwanted, running""" return input_text, output_text def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : int = self.tokenizer_class(self.vocab_file ) _snake_case : str = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(a_, ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [9, 6, 7, 12, 10, 11] ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Dict = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ), ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Dict = BasicTokenizer(do_lower_case=a_, strip_accents=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""h\u00E9llo"""] ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Any = BasicTokenizer(do_lower_case=a_, strip_accents=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : Any = BasicTokenizer(do_lower_case=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ), ["""hello"""] ) def UpperCamelCase_ ( self: Union[str, Any] ): '''simple docstring''' _snake_case : int = BasicTokenizer(do_lower_case=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=a_, strip_accents=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' _snake_case : Optional[int] = BasicTokenizer(do_lower_case=a_, strip_accents=a_ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ), ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : str = BasicTokenizer(do_lower_case=a_, never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ), ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' _snake_case : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _snake_case : Dict = {} for i, token in enumerate(a_ ): _snake_case : List[str] = i _snake_case : Union[str, Any] = WordpieceTokenizer(vocab=a_, unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ), [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ), ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ), ["""[UNK]""", """runn""", """##ing"""] ) @require_torch def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Tuple = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) _snake_case : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _snake_case : Optional[int] = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102] _snake_case : Tuple = tokenizer(a_, padding=a_, return_tensors="""pt""" ) self.assertIsInstance(a_, a_ ) _snake_case : Tuple = list(batch.input_ids.numpy()[0] ) self.assertListEqual(a_, a_ ) self.assertEqual((2, 9), batch.input_ids.shape ) self.assertEqual((2, 9), batch.attention_mask.shape ) def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) @slow def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case : Dict = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" ) _snake_case : Optional[Any] = tokenizer.encode("""sequence builders""", add_special_tokens=a_ ) _snake_case : Tuple = tokenizer.encode("""multi-sequence build""", add_special_tokens=a_ ) _snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a_ ) _snake_case : Dict = tokenizer.build_inputs_with_special_tokens(a_, a_ ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
609
1
import string from math import logaa def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_: str = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) lowerCamelCase_: Any = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_: List[Any] = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase_: Union[str, Any] = corpus_without_punctuation.split("""\n""" ) lowerCamelCase_: Tuple = term.lower() return (len([doc for doc in docs if term in doc] ), len(_UpperCAmelCase )) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ): if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ): return round(tf * idf , 3 )
702
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase : Dict = 1_6 lowercase : Optional[int] = 3_2 def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase = 1_6 ): lowerCamelCase_: List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCamelCase_: List[str] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_: Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_: List[Any] = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_: Optional[int] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_: Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_: int = 1_6 elif accelerator.mixed_precision != "no": lowerCamelCase_: List[str] = 8 else: lowerCamelCase_: List[Any] = None return tokenizer.pad( _UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCamelCase_: int = DataLoader( tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) lowerCamelCase_: Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase : Any = mocked_dataloaders # noqa: F811 def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1": lowerCamelCase_: List[str] = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCamelCase_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: lowerCamelCase_: Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_: Tuple = config["""lr"""] lowerCamelCase_: Optional[Any] = int(config["""num_epochs"""] ) lowerCamelCase_: int = int(config["""seed"""] ) lowerCamelCase_: Any = int(config["""batch_size"""] ) set_seed(_UpperCAmelCase ) lowerCamelCase_ , lowerCamelCase_: Tuple = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase_: List[Any] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowerCamelCase_: Dict = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase_: Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase_: Optional[Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_: Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_: Dict = model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_: str = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler lowerCamelCase_: Optional[int] = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCamelCase_: int = os.path.split(_UpperCAmelCase )[-1].split(""".""" )[0] accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCamelCase_: Tuple = 0 for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_: Tuple = model(**_UpperCAmelCase ) lowerCamelCase_: Any = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCamelCase_: Dict = loss / gradient_accumulation_steps accelerator.backward(_UpperCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_: Dict = model(**_UpperCAmelCase ) lowerCamelCase_: Tuple = outputs.logits.argmax(dim=-1 ) lowerCamelCase_ , lowerCamelCase_: List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) lowerCamelCase_: List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { """accuracy""": eval_metric["""accuracy"""], """f1""": eval_metric["""f1"""], """train_loss""": total_loss.item() / len(_UpperCAmelCase ), """epoch""": epoch, } , step=_UpperCAmelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def UpperCAmelCase_ ( ): lowerCamelCase_: Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=_UpperCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) lowerCamelCase_: Union[str, Any] = parser.parse_args() lowerCamelCase_: Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
584
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Initialise PyTorch model lowercase__ = BigBirdConfig.from_json_file(snake_case_ ) print(f'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: lowercase__ = BigBirdForQuestionAnswering(snake_case_ ) else: lowercase__ = BigBirdForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(snake_case_ , snake_case_ , is_trivia_qa=snake_case_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) lowercase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
413
from __future__ import annotations _snake_case = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] _snake_case = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def lowerCAmelCase_ ( snake_case_ ): _A : str = [] _A : int = len(snake_case_ ) for i in range(snake_case_ ): _A : float = -1 for j in range(i + 1,snake_case_ ): if arr[i] < arr[j]: _A : Dict = arr[j] break result.append(snake_case_ ) return result def lowerCAmelCase_ ( snake_case_ ): _A : Optional[int] = [] for i, outer in enumerate(snake_case_ ): _A : float = -1 for inner in arr[i + 1 :]: if outer < inner: _A : List[str] = inner break result.append(snake_case_ ) return result def lowerCAmelCase_ ( snake_case_ ): _A : int = len(snake_case_ ) _A : list[float] = [] _A : list[float] = [-1] * arr_size for index in reversed(range(snake_case_ ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: _A : Optional[int] = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) _snake_case = ( "from __main__ import arr, next_greatest_element_slow, " "next_greatest_element_fast, next_greatest_element" ) print( "next_greatest_element_slow():", timeit("next_greatest_element_slow(arr)", setup=setup), ) print( "next_greatest_element_fast():", timeit("next_greatest_element_fast(arr)", setup=setup), ) print( " next_greatest_element():", timeit("next_greatest_element(arr)", setup=setup), )
307
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _UpperCamelCase = 256047 _UpperCamelCase = 256145 @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( _A, unittest.TestCase ): '''simple docstring''' A__ = NllbTokenizer A__ = NllbTokenizerFast A__ = True A__ = True A__ = {} def lowercase__ ( self : Any ) -> Dict: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase ) lowerCAmelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(_lowerCAmelCase ) self.assertListEqual( _lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowercase__ ( self : str ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) # Save tokenizer rust, legacy_format=True lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) # Save tokenizer rust, legacy_format=False lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.save_pretrained(_lowerCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.from_pretrained(_lowerCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) ) shutil.rmtree(_lowerCAmelCase ) @require_torch def lowercase__ ( self : str ) -> str: '''simple docstring''' if not self.test_seqaseq: return lowerCAmelCase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. lowerCAmelCase__ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] lowerCAmelCase__ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: lowerCAmelCase__ = tokenizer.prepare_seqaseq_batch( src_texts=_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified lowerCAmelCase__ = tokenizer.prepare_seqaseq_batch( _lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) lowerCAmelCase__ = tokenizer.prepare_seqaseq_batch( src_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("""decoder_input_ids""" , _lowerCAmelCase ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def lowercase__ ( self : int ) -> Tuple: '''simple docstring''' pass def lowercase__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase__ = [AddedToken("""<special>""" , lstrip=_lowerCAmelCase )] lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_r.encode("""Hey this is a <special> token""" ) lowerCAmelCase__ = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowerCAmelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase ) lowerCAmelCase__ = tokenizer_p.encode("""Hey this is a <special> token""" ) lowerCAmelCase__ = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' A__ = '''facebook/nllb-200-distilled-600M''' A__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] A__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] A__ = [ 25_60_47, 1_62_97, 13_44_08, 81_65, 24_80_66, 1_47_34, 9_50, 11_35, 10_57_21, 35_73, 83, 2_73_52, 1_08, 4_94_86, 2, ] @classmethod def lowercase__ ( cls : int ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" ) lowerCAmelCase__ = 1 return cls def lowercase__ ( self : int ) -> Tuple: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_6001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_6002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_6057 ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase ) def lowercase__ ( self : Any ) -> Optional[int]: '''simple docstring''' self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids ) # fmt: off lowerCAmelCase__ = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047] # fmt: on lowerCAmelCase__ = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase ) def lowercase__ ( self : str ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , _lowerCAmelCase ) lowerCAmelCase__ = 10 lowerCAmelCase__ = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , _lowerCAmelCase ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) def lowercase__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_6203, 3] ) def lowercase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_lowerCAmelCase ) lowerCAmelCase__ = NllbTokenizer.from_pretrained(_lowerCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase ) @require_torch def lowercase__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) lowerCAmelCase__ = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) lowerCAmelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowercase__ ( self : Optional[int] ) -> Any: '''simple docstring''' lowerCAmelCase__ = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" ) lowerCAmelCase__ = self.tokenizer( text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="""pt""" ) lowerCAmelCase__ = targets["""input_ids"""] lowerCAmelCase__ = shift_tokens_right( _lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowercase__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { # A, test, EOS, en_XX """input_ids""": [[25_6047, 70, 7356, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_6057, } , ) @require_torch def lowercase__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = True lowerCAmelCase__ = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] ) lowerCAmelCase__ = False lowerCAmelCase__ = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
709
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __A : Tuple , __A : Union[str, Any]=7 , __A : Any=3 , __A : Dict=18 , __A : Dict=30 , __A : Dict=400 , __A : Dict=True , __A : Union[str, Any]=32 , __A : List[Any]=True , ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = image_size lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution lowerCAmelCase__ = do_resize lowerCAmelCase__ = size_divisor lowerCAmelCase__ = do_rescale def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCamelCase__ ( _A, unittest.TestCase ): '''simple docstring''' A__ = GLPNImageProcessor if is_vision_available() else None def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = GLPNImageProcessingTester(self ) @property def lowercase__ ( self : int ) -> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self : List[str] ) -> int: '''simple docstring''' lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , """do_resize""" ) ) self.assertTrue(hasattr(__A , """size_divisor""" ) ) self.assertTrue(hasattr(__A , """resample""" ) ) self.assertTrue(hasattr(__A , """do_rescale""" ) ) def lowercase__ ( self : Dict ) -> List[str]: '''simple docstring''' pass def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowercase__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def lowercase__ ( self : List[str] ) -> str: '''simple docstring''' lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
211
0
__lowerCamelCase : Tuple = [0, 2, 4, 6, 8] __lowerCamelCase : Optional[int] = [1, 3, 5, 7, 9] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> int: if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1, -1, -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 A__ : List[Any] =0 for digit in range(1_0 ): A__ : str =digit result += reversible_numbers( 0, (remainder + 2 * digit) // 1_0, snake_case_, snake_case_ ) return result A__ : Any =0 for digita in range(1_0 ): A__ : Any =digita if (remainder + digita) % 2 == 0: A__ : Optional[Any] =ODD_DIGITS else: A__ : int =EVEN_DIGITS for digita in other_parity_digits: A__ : Any =digita result += reversible_numbers( remaining_length - 2, (remainder + digita + digita) // 1_0, snake_case_, snake_case_, ) return result def SCREAMING_SNAKE_CASE__ ( snake_case_ = 9 ) -> int: A__ : List[str] =0 for length in range(1, max_power + 1 ): result += reversible_numbers(snake_case_, 0, [0] * length, snake_case_ ) return result if __name__ == "__main__": print(F"{solution() = }")
416
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class a : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_28 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , )-> Tuple: '''simple docstring''' A__ : Optional[int] =parent A__ : Any =batch_size A__ : Dict =seq_length A__ : Optional[Any] =is_training A__ : Tuple =use_input_mask A__ : Union[str, Any] =use_token_type_ids A__ : Tuple =use_labels A__ : List[str] =vocab_size A__ : Optional[int] =hidden_size A__ : Dict =num_hidden_layers A__ : List[Any] =num_attention_heads A__ : Optional[int] =intermediate_size A__ : Union[str, Any] =hidden_act A__ : int =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : Optional[int] =max_position_embeddings A__ : Tuple =type_vocab_size A__ : Dict =type_sequence_label_size A__ : Any =initializer_range A__ : Any =num_labels A__ : str =num_choices A__ : Union[str, Any] =scope def lowerCAmelCase_ ( self )-> str: '''simple docstring''' A__ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Tuple =None if self.use_input_mask: A__ : Tuple =random_attention_mask([self.batch_size, self.seq_length] ) A__ : Tuple =None if self.use_token_type_ids: A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ : Dict =None A__ : str =None A__ : str =None if self.use_labels: A__ : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ : Dict =ids_tensor([self.batch_size] , self.num_choices ) A__ : int =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : str =self.prepare_config_and_inputs() A__ : str =True A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ : Any =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict: '''simple docstring''' A__ : int =NezhaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : Optional[Any] =model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) A__ : int =model(__UpperCamelCase , token_type_ids=__UpperCamelCase ) A__ : int =model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Union[str, Any]: '''simple docstring''' A__ : int =True A__ : Any =NezhaModel(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : Union[str, Any] =model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , ) A__ : List[Any] =model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , ) A__ : int =model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Any: '''simple docstring''' A__ : Optional[Any] =NezhaForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : Optional[int] =model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]: '''simple docstring''' A__ : Optional[int] =NezhaForNextSentencePrediction(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : Optional[Any] =model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int: '''simple docstring''' A__ : str =NezhaForPreTraining(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : Union[str, Any] =model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str: '''simple docstring''' A__ : Dict =NezhaForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : Union[str, Any] =model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]: '''simple docstring''' A__ : str =self.num_labels A__ : int =NezhaForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : int =model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]: '''simple docstring''' A__ : Dict =self.num_labels A__ : Optional[int] =NezhaForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : List[Any] =model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> str: '''simple docstring''' A__ : Optional[int] =self.num_choices A__ : Tuple =NezhaForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() A__ : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A__ : Optional[Any] =model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' A__ : Tuple =self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : Optional[Any] =config_and_inputs A__ : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class a ( UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ): __lowercase = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __lowercase = ( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) __lowercase = True def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False )-> Optional[Any]: '''simple docstring''' A__ : Any =super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): A__ : List[str] =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase ) A__ : List[str] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase ) return inputs_dict def lowerCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' A__ : int =NezhaModelTester(self ) A__ : Tuple =ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def lowerCAmelCase_ ( self )-> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' A__ : str =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> List[str]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) : int =self.model_tester.prepare_config_and_inputs_for_decoder() A__ : Optional[int] =None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> Dict: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> int: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> List[Any]: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase ) def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' A__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def lowerCAmelCase_ ( self )-> Dict: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : List[Any] =NezhaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @slow @require_torch_gpu def lowerCAmelCase_ ( self )-> Any: '''simple docstring''' A__ , A__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return A__ : Optional[int] =True A__ : List[str] =model_class(config=__UpperCamelCase ) A__ : Dict =self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) A__ : Optional[int] =torch.jit.trace( __UpperCamelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , '''bert.pt''' ) ) A__ : Tuple =torch.jit.load(os.path.join(__UpperCamelCase , '''bert.pt''' ) , map_location=__UpperCamelCase ) loaded(inputs_dict['''input_ids'''].to(__UpperCamelCase ) , inputs_dict['''attention_mask'''].to(__UpperCamelCase ) ) @require_torch class a ( unittest.TestCase ): @slow def lowerCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' A__ : List[Any] =NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) A__ : Optional[Any] =torch.tensor([[0, 1, 2, 3, 4, 5]] ) A__ : Dict =torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A__ : Tuple =model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A__ : Tuple =torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , __UpperCamelCase ) A__ : Optional[Any] =torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) ) @slow def lowerCAmelCase_ ( self )-> str: '''simple docstring''' A__ : List[str] =NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) A__ : List[str] =torch.tensor([[0, 1, 2, 3, 4, 5]] ) A__ : Union[str, Any] =torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A__ : Any =model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0] A__ : str =torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , __UpperCamelCase ) A__ : List[Any] =torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) )
416
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) a__ : Dict = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = {} state_dict.pop('pixel_mean', _snake_case ) state_dict.pop('pixel_std', _snake_case ) _lowerCAmelCase = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _lowerCAmelCase = key.replace(_snake_case, _snake_case ) if re.match(_snake_case, _snake_case ): _lowerCAmelCase = int(re.match(_snake_case, _snake_case ).group(2 ) ) if layer_nb == 0: _lowerCAmelCase = key.replace('layers.0', 'proj_in' ) elif layer_nb == 1: _lowerCAmelCase = key.replace('layers.1', 'layers.0' ) elif layer_nb == 2: _lowerCAmelCase = key.replace('layers.2', 'proj_out' ) _lowerCAmelCase = value _lowerCAmelCase = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="ybelkada/segment-anything" ): """simple docstring""" _lowerCAmelCase = hf_hub_download(_snake_case, F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _lowerCAmelCase = SamConfig() elif "sam_vit_l" in model_name: _lowerCAmelCase = SamVisionConfig( hidden_size=1_0_2_4, num_hidden_layers=2_4, num_attention_heads=1_6, global_attn_indexes=[5, 1_1, 1_7, 2_3], ) _lowerCAmelCase = SamConfig( vision_config=_snake_case, ) elif "sam_vit_h" in model_name: _lowerCAmelCase = SamVisionConfig( hidden_size=1_2_8_0, num_hidden_layers=3_2, num_attention_heads=1_6, global_attn_indexes=[7, 1_5, 2_3, 3_1], ) _lowerCAmelCase = SamConfig( vision_config=_snake_case, ) _lowerCAmelCase = torch.load(_snake_case, map_location='cpu' ) _lowerCAmelCase = replace_keys(_snake_case ) _lowerCAmelCase = SamImageProcessor() _lowerCAmelCase = SamProcessor(image_processor=_snake_case ) _lowerCAmelCase = SamModel(_snake_case ) hf_model.load_state_dict(_snake_case ) _lowerCAmelCase = hf_model.to('cuda' ) _lowerCAmelCase = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' _lowerCAmelCase = Image.open(requests.get(_snake_case, stream=_snake_case ).raw ).convert('RGB' ) _lowerCAmelCase = [[[4_0_0, 6_5_0]]] _lowerCAmelCase = [[1]] _lowerCAmelCase = processor(images=np.array(_snake_case ), return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _lowerCAmelCase = hf_model(**_snake_case ) _lowerCAmelCase = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_8902_5115_9668 _lowerCAmelCase = processor( images=np.array(_snake_case ), input_points=_snake_case, input_labels=_snake_case, return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _lowerCAmelCase = hf_model(**_snake_case ) _lowerCAmelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712_6030_9219_3604 _lowerCAmelCase = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),) _lowerCAmelCase = processor(images=np.array(_snake_case ), input_boxes=_snake_case, return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _lowerCAmelCase = hf_model(**_snake_case ) _lowerCAmelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686_0156_0592_6514 # Test with 2 points and 1 image. _lowerCAmelCase = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]] _lowerCAmelCase = [[1, 1]] _lowerCAmelCase = processor( images=np.array(_snake_case ), input_points=_snake_case, input_labels=_snake_case, return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _lowerCAmelCase = hf_model(**_snake_case ) _lowerCAmelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936_0477_9243_4692 if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() a__ : Optional[int] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) a__ : Optional[Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
711
"""simple docstring""" import argparse from collections import defaultdict import yaml a__ : List[str] = """docs/source/en/_toctree.yml""" def A__ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase = defaultdict(__lowerCamelCase ) for doc in model_doc: counts[doc["local"]] += 1 _lowerCAmelCase = [key for key, value in counts.items() if value > 1] _lowerCAmelCase = [] for duplicate_key in duplicates: _lowerCAmelCase = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} ) if len(__lowerCamelCase ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ' 'others.' ) # Only add this once new_doc.append({'local': duplicate_key, 'title': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] ) # Sort return sorted(__lowerCamelCase, key=lambda __lowerCamelCase : s["title"].lower() ) def A__ ( __lowerCamelCase=False ): """simple docstring""" with open(__lowerCamelCase, encoding='utf-8' ) as f: _lowerCAmelCase = yaml.safe_load(f.read() ) # Get to the API doc _lowerCAmelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _lowerCAmelCase = content[api_idx]['sections'] # Then to the model doc _lowerCAmelCase = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 _lowerCAmelCase = api_doc[model_idx]['sections'] _lowerCAmelCase = [(idx, section) for idx, section in enumerate(__lowerCamelCase ) if 'sections' in section] _lowerCAmelCase = False for idx, modality_doc in modalities_docs: _lowerCAmelCase = modality_doc['sections'] _lowerCAmelCase = clean_model_doc_toc(__lowerCamelCase ) if old_modality_doc != new_modality_doc: _lowerCAmelCase = True if overwrite: _lowerCAmelCase = new_modality_doc if diff: if overwrite: _lowerCAmelCase = model_doc _lowerCAmelCase = api_doc with open(__lowerCamelCase, 'w', encoding='utf-8' ) as f: f.write(yaml.dump(__lowerCamelCase, allow_unicode=__lowerCamelCase ) ) else: raise ValueError( 'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' ) if __name__ == "__main__": a__ : Dict = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a__ : str = parser.parse_args() check_model_doc(args.fix_and_overwrite)
309
0
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = DistilBertTokenizer __snake_case = DistilBertTokenizerFast __snake_case = True @slow def lowercase__ ( self : Dict ) -> Tuple: '''simple docstring''' A__ : List[Any] =DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" ) A__ : Optional[int] =tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase_ ) A__ : Optional[int] =tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase_ ) A__ : Tuple =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) A__ : Optional[Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
215
'''simple docstring''' __snake_case : Optional[Any] = 8.314462 # Unit - J mol-1 K-1 def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
215
1
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class A_ : """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Dict: a : Dict = parent a : Tuple = batch_size a : Dict = seq_length a : List[Any] = is_training a : Any = use_input_mask a : List[Any] = use_token_type_ids a : Dict = use_labels a : Optional[int] = vocab_size a : int = hidden_size a : Union[str, Any] = num_hidden_layers a : int = num_attention_heads a : Any = intermediate_size a : Tuple = hidden_act a : str = hidden_dropout_prob a : str = attention_probs_dropout_prob a : str = max_position_embeddings a : str = type_vocab_size a : Any = type_sequence_label_size a : Optional[int] = initializer_range a : Union[str, Any] = num_labels a : List[str] = num_choices a : Optional[Any] = scope def lowercase_ ( self ) -> Union[str, Any]: a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Optional[Any] = None if self.use_input_mask: a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None if self.use_token_type_ids: a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a : List[str] = None a : List[Any] = None a : Optional[Any] = None if self.use_labels: a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : int = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self ) -> Tuple: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: a : Optional[int] = BioGptModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) a : int = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: a : Any = BioGptForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ) -> List[str]: a : Any = BioGptModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # create attention mask a : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase ) a : Tuple = self.seq_length // 2 a : str = 0 # first forward pass a , a : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids a : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids a : List[str] = ids_tensor((1,) , __UpperCAmelCase ).item() + 1 a : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) a : Tuple = random_other_next_tokens # append to next input_ids and attn_mask a : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) a : Optional[Any] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCAmelCase )] , dim=1 , ) # get two different outputs a : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['last_hidden_state'] a : Union[str, Any] = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['last_hidden_state'] # select random slice a : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() a : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach() a : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ) -> Tuple: a : List[str] = BioGptModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() a : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCAmelCase ) # first forward pass a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) a , a : List[str] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) a : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) a : str = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) a : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['last_hidden_state'] a : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[ 'last_hidden_state' ] # select random slice a : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() a : str = output_from_no_past[:, -3:, random_slice_idx].detach() a : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]: a : Union[str, Any] = BioGptForCausalLM(__UpperCAmelCase ) model.to(__UpperCAmelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() a : str = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def lowercase_ ( self , __UpperCAmelCase , *__UpperCAmelCase ) -> List[str]: a : Optional[int] = BioGptModel(__UpperCAmelCase ) a : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase ) -> Any: a : List[str] = self.num_labels a : Any = BioGptForTokenClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase_ ( self ) -> List[str]: a : Any = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[int] = config_and_inputs a : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowercase : List[str] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) lowercase : Any = (BioGptForCausalLM,) if is_torch_available() else () lowercase : int = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) lowercase : List[str] = False def lowercase_ ( self ) -> List[Any]: a : str = BioGptModelTester(self ) a : str = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowercase_ ( self ) -> Tuple: self.config_tester.run_common_tests() def lowercase_ ( self ) -> int: a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowercase_ ( self ) -> List[str]: a : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a : Tuple = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowercase_ ( self ) -> Optional[Any]: a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCAmelCase ) def lowercase_ ( self ) -> Optional[int]: a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__UpperCAmelCase , gradient_checkpointing=__UpperCAmelCase ) def lowercase_ ( self ) -> Optional[int]: a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCAmelCase ) def lowercase_ ( self ) -> Any: a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCAmelCase ) def lowercase_ ( self ) -> Optional[Any]: a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCAmelCase ) @slow def lowercase_ ( self ) -> Optional[int]: a : Optional[int] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(__UpperCAmelCase ) a : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) a : List[str] = 'left' # Define PAD Token = EOS Token = 50256 a : Any = tokenizer.eos_token a : Any = model.config.eos_token_id # use different length sentences to test batching a : List[str] = [ 'Hello, my dog is a little', 'Today, I', ] a : Any = tokenizer(__UpperCAmelCase , return_tensors='pt' , padding=__UpperCAmelCase ) a : Dict = inputs['input_ids'].to(__UpperCAmelCase ) a : List[str] = model.generate( input_ids=__UpperCAmelCase , attention_mask=inputs['attention_mask'].to(__UpperCAmelCase ) , ) a : List[str] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__UpperCAmelCase ) a : Union[str, Any] = model.generate(input_ids=__UpperCAmelCase ) a : str = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item() a : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__UpperCAmelCase ) a : str = model.generate(input_ids=__UpperCAmelCase , max_length=model.config.max_length - num_paddings ) a : List[Any] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) a : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) a : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) a : Any = [ 'Hello, my dog is a little bit bigger than a little bit.', 'Today, I have a good idea of how to use the information', ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] ) @slow def lowercase_ ( self ) -> Any: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = BioGptModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def lowercase_ ( self ) -> Union[str, Any]: a , a : int = self.model_tester.prepare_config_and_inputs_for_common() a : str = 3 a : Tuple = input_dict['input_ids'] a : Tuple = input_ids.ne(1 ).to(__UpperCAmelCase ) a : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) a : Optional[Any] = BioGptForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase_ ( self ) -> Union[str, Any]: a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() a : str = 3 a : Tuple = 'multi_label_classification' a : Optional[int] = input_dict['input_ids'] a : Tuple = input_ids.ne(1 ).to(__UpperCAmelCase ) a : Dict = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) a : int = BioGptForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class A_ ( unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> Optional[Any]: a : Optional[int] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) a : Any = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) a : int = model(__UpperCAmelCase )[0] a : List[Any] = 4_23_84 a : str = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) a : Any = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ) -> List[Any]: a : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) a : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' ) model.to(__UpperCAmelCase ) torch.manual_seed(0 ) a : Any = tokenizer('COVID-19 is' , return_tensors='pt' ).to(__UpperCAmelCase ) a : Union[str, Any] = model.generate( **__UpperCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__UpperCAmelCase , ) a : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) a : Union[str, Any] = ( 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the' ' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and' ' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),' ' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and' ' more than 800,000 deaths.' ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
509
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( _UpperCAmelCase ): """simple docstring""" lowercase : Tuple = ["image_processor", "tokenizer"] lowercase : str = "AutoImageProcessor" lowercase : List[Any] = "AutoTokenizer" def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) a : Dict = self.image_processor def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: a : int = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: a : Optional[Any] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: a : int = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def lowercase_ ( self ) -> Any: return ["input_ids", "attention_mask", "pixel_values"]
509
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class _UpperCAmelCase : """simple docstring""" def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , _lowercase=1000 , ) -> int: _lowerCamelCase : List[Any] = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : int = seq_length _lowerCamelCase : List[str] = is_training _lowerCamelCase : List[Any] = use_input_mask _lowerCamelCase : Dict = use_token_type_ids _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : List[Any] = hidden_act _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Union[str, Any] = type_vocab_size _lowerCamelCase : Union[str, Any] = type_sequence_label_size _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : Optional[Any] = num_labels _lowerCamelCase : Dict = num_choices _lowerCamelCase : Union[str, Any] = scope _lowerCamelCase : List[Any] = range_bbox def a__ ( self ) -> str: _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment _lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCamelCase : Tuple = bbox[i, j, 3] _lowerCamelCase : Optional[int] = bbox[i, j, 1] _lowerCamelCase : Union[str, Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCamelCase : List[str] = bbox[i, j, 2] _lowerCamelCase : str = bbox[i, j, 0] _lowerCamelCase : Optional[Any] = t _lowerCamelCase : Optional[int] = tf.convert_to_tensor(_lowercase ) _lowerCamelCase : Any = None if self.use_input_mask: _lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : List[Any] = None if self.use_token_type_ids: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCamelCase : Tuple = None _lowerCamelCase : str = None _lowerCamelCase : Tuple = None if self.use_labels: _lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int: _lowerCamelCase : Union[str, Any] = TFLayoutLMModel(config=_lowercase ) _lowerCamelCase : List[Any] = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) _lowerCamelCase : Tuple = model(_lowercase , _lowercase , token_type_ids=_lowercase ) _lowerCamelCase : Optional[int] = model(_lowercase , _lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict: _lowerCamelCase : str = TFLayoutLMForMaskedLM(config=_lowercase ) _lowerCamelCase : Tuple = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: _lowerCamelCase : Union[str, Any] = self.num_labels _lowerCamelCase : Any = TFLayoutLMForSequenceClassification(config=_lowercase ) _lowerCamelCase : Any = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]: _lowerCamelCase : int = self.num_labels _lowerCamelCase : Any = TFLayoutLMForTokenClassification(config=_lowercase ) _lowerCamelCase : int = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: _lowerCamelCase : int = TFLayoutLMForQuestionAnswering(config=_lowercase ) _lowerCamelCase : Optional[Any] = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self ) -> Tuple: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : str = config_and_inputs _lowerCamelCase : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class _UpperCAmelCase ( a_ , a_ , unittest.TestCase ): """simple docstring""" __snake_case = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __snake_case = ( { """feature-extraction""": TFLayoutLMModel, """fill-mask""": TFLayoutLMForMaskedLM, """text-classification""": TFLayoutLMForSequenceClassification, """token-classification""": TFLayoutLMForTokenClassification, """zero-shot""": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __snake_case = False __snake_case = True __snake_case = 10 def a__ ( self ) -> Optional[int]: _lowerCamelCase : Tuple = TFLayoutLMModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=_lowercase , hidden_size=37 ) def a__ ( self ) -> Optional[int]: self.config_tester.run_common_tests() def a__ ( self ) -> Tuple: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase ) def a__ ( self ) -> int: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowercase ) def a__ ( self ) -> List[str]: _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowercase ) def a__ ( self ) -> Optional[Any]: _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowercase ) def a__ ( self ) -> str: _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowercase ) @slow def a__ ( self ) -> Optional[int]: for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[str] = TFLayoutLMModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def a__ ( self ) -> List[str]: pass def UpperCamelCase ( ) ->int: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _lowerCamelCase : List[str] = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 _lowerCamelCase : Any = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _lowerCamelCase : Any = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 _lowerCamelCase : Optional[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _lowerCamelCase : int = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def a__ ( self ) -> Optional[int]: _lowerCamelCase : List[Any] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = prepare_layoutlm_batch_inputs() # forward pass _lowerCamelCase : Any = model(input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) # test the sequence output on [0, :3, :3] _lowerCamelCase : str = tf.convert_to_tensor( [[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-3 ) ) # test the pooled output on [1, :3] _lowerCamelCase : Dict = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowercase , atol=1E-3 ) ) @slow def a__ ( self ) -> Optional[Any]: # initialize model with randomly initialized sequence classification head _lowerCamelCase : int = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = prepare_layoutlm_batch_inputs() # forward pass _lowerCamelCase : str = model( input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar _lowerCamelCase : List[Any] = outputs.loss _lowerCamelCase : Optional[Any] = (2,) self.assertEqual(loss.shape , _lowercase ) # test the shape of the logits _lowerCamelCase : int = outputs.logits _lowerCamelCase : Optional[int] = (2, 2) self.assertEqual(logits.shape , _lowercase ) @slow def a__ ( self ) -> str: # initialize model with randomly initialized token classification head _lowerCamelCase : Optional[int] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = prepare_layoutlm_batch_inputs() # forward pass _lowerCamelCase : Tuple = model( input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase ) # test the shape of the logits _lowerCamelCase : List[str] = outputs.logits _lowerCamelCase : str = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , _lowercase ) @slow def a__ ( self ) -> Union[str, Any]: # initialize model with randomly initialized token classification head _lowerCamelCase : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowerCamelCase : Optional[int] = model(input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase ) # test the shape of the logits _lowerCamelCase : Optional[Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , _lowercase ) self.assertEqual(outputs.end_logits.shape , _lowercase )
434
"""simple docstring""" from jiwer import compute_measures import datasets SCREAMING_SNAKE_CASE__ : Dict ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' SCREAMING_SNAKE_CASE__ : Dict ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' SCREAMING_SNAKE_CASE__ : Dict ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def a__ ( self ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def a__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Dict: if concatenate_texts: return compute_measures(_lowercase , _lowercase )["wer"] else: _lowerCamelCase : Any = 0 _lowerCamelCase : Tuple = 0 for prediction, reference in zip(_lowercase , _lowercase ): _lowerCamelCase : Any = compute_measures(_lowercase , _lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
434
1
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowerCamelCase__: UpperCAmelCase__ : List[str] = MBartConfig UpperCAmelCase__ : str = {} UpperCAmelCase__ : int = 'gelu' def __init__( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: str=13 , UpperCamelCase_: Union[str, Any]=7 , UpperCamelCase_: Dict=True , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: str=99 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Any=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]=37 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Tuple=20 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: Dict=0 , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = bos_token_id def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowerCamelCase = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[int] ): __lowerCamelCase = TFMBartModel(config=UpperCamelCase_ ).get_decoder() __lowerCamelCase = inputs_dict["""input_ids"""] __lowerCamelCase = input_ids[:1, :] __lowerCamelCase = inputs_dict["""attention_mask"""][:1, :] __lowerCamelCase = inputs_dict["""head_mask"""] __lowerCamelCase = 1 # first forward pass __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) __lowerCamelCase, __lowerCamelCase = outputs.to_tuple() __lowerCamelCase = past_key_values[1] def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Dict , A__ : Tuple=None , A__ : List[Any]=None , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , ): '''simple docstring''' if attention_mask is None: __lowerCamelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowerCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowerCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowerCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase__ : List[str] = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase__ : Union[str, Any] = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase__ : Any = True UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : Union[str, Any] = False def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: str ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = TFMBartModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Dict ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ ) @require_sentencepiece @require_tokenizers @require_tf class lowerCamelCase__( unittest.TestCase): UpperCAmelCase__ : Any = [ ' UN Chief Says There Is No Military Solution in Syria', ] UpperCAmelCase__ : Tuple = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] UpperCAmelCase__ : str = 'facebook/mbart-large-en-ro' @cached_property def lowerCAmelCase__ ( self: Tuple ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: Union[str, Any] ): __lowerCamelCase = self.translate_src_text(**UpperCamelCase_ ) self.assertListEqual(self.expected_text , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Optional[int] , **UpperCamelCase_: List[Any] ): __lowerCamelCase = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors="""tf""" ) __lowerCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __lowerCamelCase = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) return generated_words @slow def lowerCAmelCase__ ( self: List[Any] ): self._assert_generated_batch_equal_expected()
704
def lowerCamelCase__ ( A__ : dict ): '''simple docstring''' __lowerCamelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowerCamelCase = set() return any( node not in visited and depth_first_search(A__ , A__ , A__ , A__ ) for node in graph ) def lowerCamelCase__ ( A__ : dict , A__ : int , A__ : set , A__ : set ): '''simple docstring''' visited.add(A__ ) rec_stk.add(A__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(A__ , A__ , A__ , A__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(A__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
80
0
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16000 ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = int(round(sample_rate * max_length ) ) if len(__UpperCAmelCase ) <= sample_length: return wav __SCREAMING_SNAKE_CASE = randint(0 , len(__UpperCAmelCase ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class __a : __UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'Name of a dataset from the datasets package'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A file containing the training audio paths and labels.'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A file containing the validation audio paths and labels.'} ) __UpperCamelCase : str = field( default='train', metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' }, ) __UpperCamelCase : str = field( default='validation', metadata={ 'help': ( 'The name of the training data set split to use (via the datasets library). Defaults to \'validation\'' ) }, ) __UpperCamelCase : str = field( default='audio', metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''}, ) __UpperCamelCase : str = field( default='label', metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) __UpperCamelCase : float = field( default=20, metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'}, ) @dataclass class __a : __UpperCamelCase : str = field( default='facebook/wav2vec2-base', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} ) __UpperCamelCase : str = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Name or path of preprocessor config.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) __UpperCamelCase : Optional[bool] = field( default=_snake_case, metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}, ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" ,lowerCamelCase ,) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def __magic_name__ ( ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __UpperCAmelCase , __UpperCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. __SCREAMING_SNAKE_CASE = DatasetDict() __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ f"""{', '.join(raw_datasets['train'].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ f"""{', '.join(raw_datasets['train'].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy __SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. __SCREAMING_SNAKE_CASE = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) __SCREAMING_SNAKE_CASE = feature_extractor.model_input_names[0] def train_transforms(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [] for audio in batch[data_args.audio_column_name]: __SCREAMING_SNAKE_CASE = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = feature_extractor(__UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate ) __SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(__UpperCAmelCase )} __SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [audio["""array"""] for audio in batch[data_args.audio_column_name]] __SCREAMING_SNAKE_CASE = feature_extractor(__UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate ) __SCREAMING_SNAKE_CASE = {model_input_name: inputs.get(__UpperCAmelCase )} __SCREAMING_SNAKE_CASE = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features[data_args.label_column_name].names __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = {}, {} for i, label in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = str(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = label # Load the accuracy metric from the datasets package __SCREAMING_SNAKE_CASE = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__UpperCAmelCase , references=eval_pred.label_ids ) __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel=__UpperCAmelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__UpperCAmelCase , output_all_columns=__UpperCAmelCase ) if training_args.do_eval: if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__UpperCAmelCase , output_all_columns=__UpperCAmelCase ) # Initialize our trainer __SCREAMING_SNAKE_CASE = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE = trainer.evaluate() trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) if __name__ == "__main__": main()
109
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
258
0
import string def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ): for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase ='' for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase =string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =num - key if num < 0: __UpperCamelCase =num + len(string.ascii_uppercase ) __UpperCamelCase =translated + string.ascii_uppercase[num] else: __UpperCamelCase =translated + symbol print(F'Decryption using Key #{key}: {translated}' ) def _UpperCAmelCase ( ): __UpperCamelCase =input('Encrypted message: ' ) __UpperCamelCase =message.upper() decrypt(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
682
import math from collections.abc import Callable def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ): __UpperCamelCase =xa __UpperCamelCase =xa while True: if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ): raise ZeroDivisionError('float division by zero, could not find root' ) __UpperCamelCase =x_na - ( function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na __UpperCamelCase =x_na __UpperCamelCase =x_na def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ): return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
682
1
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( _lowercase ): _lowercase : str = (PNDMScheduler,) _lowercase : List[Any] = (('''num_inference_steps''', 50),) def lowerCAmelCase_ ( self : str , **__A : Optional[int] ): __A : List[str] = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**__A ) return config def lowerCAmelCase_ ( self : List[str] , __A : str=0 , **__A : List[str] ): __A : Tuple = dict(self.forward_default_kwargs ) __A : str = kwargs.pop("""num_inference_steps""" , __A ) __A : str = self.dummy_sample __A : Any = 0.1 * sample __A : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __A : Dict = self.get_scheduler_config(**__A ) __A : Optional[Any] = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals __A : Union[str, Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) __A : str = scheduler_class.from_pretrained(__A ) new_scheduler.set_timesteps(__A ) # copy over dummy past residuals __A : Any = dummy_past_residuals[:] __A : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample __A : int = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __A : int = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample __A : List[str] = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCAmelCase_ ( self : str ): pass def lowerCAmelCase_ ( self : Optional[int] , __A : Dict=0 , **__A : str ): __A : List[str] = dict(self.forward_default_kwargs ) __A : Dict = kwargs.pop("""num_inference_steps""" , __A ) __A : int = self.dummy_sample __A : Tuple = 0.1 * sample __A : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __A : Any = self.get_scheduler_config() __A : Union[str, Any] = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals (must be after setting timesteps) __A : Dict = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) __A : List[str] = scheduler_class.from_pretrained(__A ) # copy over dummy past residuals new_scheduler.set_timesteps(__A ) # copy over dummy past residual (must be after setting timesteps) __A : List[Any] = dummy_past_residuals[:] __A : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample __A : Any = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __A : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample __A : Tuple = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCAmelCase_ ( self : Union[str, Any] , **__A : Optional[Any] ): __A : int = self.scheduler_classes[0] __A : List[Any] = self.get_scheduler_config(**__A ) __A : List[Any] = scheduler_class(**__A ) __A : Optional[int] = 10 __A : Optional[Any] = self.dummy_model() __A : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.prk_timesteps ): __A : Tuple = model(__A , __A ) __A : List[str] = scheduler.step_prk(__A , __A , __A ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): __A : Optional[Any] = model(__A , __A ) __A : Tuple = scheduler.step_plms(__A , __A , __A ).prev_sample return sample def lowerCAmelCase_ ( self : str ): __A : Tuple = dict(self.forward_default_kwargs ) __A : Optional[int] = kwargs.pop("""num_inference_steps""" , __A ) for scheduler_class in self.scheduler_classes: __A : str = self.get_scheduler_config() __A : List[str] = scheduler_class(**__A ) __A : Any = self.dummy_sample __A : str = 0.1 * sample if num_inference_steps is not None and hasattr(__A , """set_timesteps""" ): scheduler.set_timesteps(__A ) elif num_inference_steps is not None and not hasattr(__A , """set_timesteps""" ): __A : Union[str, Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __A : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] __A : Any = dummy_past_residuals[:] __A : Any = scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample __A : Tuple = scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __A : Dict = scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample __A : List[Any] = scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCAmelCase_ ( self : Optional[int] ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__A ) def lowerCAmelCase_ ( self : Optional[int] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__A ) __A : int = self.scheduler_classes[0] __A : Any = self.get_scheduler_config(steps_offset=1 ) __A : List[str] = scheduler_class(**__A ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def lowerCAmelCase_ ( self : Any ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__A , beta_end=__A ) def lowerCAmelCase_ ( self : str ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__A ) def lowerCAmelCase_ ( self : List[str] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def lowerCAmelCase_ ( self : Optional[Any] ): for t in [1, 5, 10]: self.check_over_forward(time_step=__A ) def lowerCAmelCase_ ( self : Any ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__A ) def lowerCAmelCase_ ( self : Optional[Any] ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 __A : Dict = 27 for scheduler_class in self.scheduler_classes: __A : Tuple = self.dummy_sample __A : List[str] = 0.1 * sample __A : List[Any] = self.get_scheduler_config() __A : int = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): __A : Any = scheduler.step_prk(__A , __A , __A ).prev_sample def lowerCAmelCase_ ( self : List[Any] ): with self.assertRaises(__A ): __A : Optional[int] = self.scheduler_classes[0] __A : int = self.get_scheduler_config() __A : Tuple = scheduler_class(**__A ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def lowerCAmelCase_ ( self : Dict ): __A : Dict = self.full_loop() __A : int = torch.sum(torch.abs(__A ) ) __A : str = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3 def lowerCAmelCase_ ( self : Optional[Any] ): __A : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" ) __A : Tuple = torch.sum(torch.abs(__A ) ) __A : List[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2 assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3 def lowerCAmelCase_ ( self : str ): # We specify different beta, so that the first alpha is 0.99 __A : Optional[Any] = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 ) __A : List[Any] = torch.sum(torch.abs(__A ) ) __A : Optional[int] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2 assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3 def lowerCAmelCase_ ( self : Optional[int] ): # We specify different beta, so that the first alpha is 0.99 __A : Optional[int] = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 ) __A : Optional[int] = torch.sum(torch.abs(__A ) ) __A : List[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
17
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class UpperCamelCase__( datasets.BeamBasedBuilder ): def a__( self : List[str] )-> int: """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCAmelCase , ) def a__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] )-> int: """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )] def a__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> int: """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase ) class UpperCamelCase__( datasets.BeamBasedBuilder ): def a__( self : Optional[int] )-> str: """simple docstring""" return datasets.DatasetInfo( features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCAmelCase , ) def a__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> List[Any]: """simple docstring""" return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} ) ] def a__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] )-> Optional[int]: """simple docstring""" import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase ) def lowerCamelCase__ ( ): '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] def lowerCamelCase__ ( ): '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )] class UpperCamelCase__( lowerCAmelCase ): @require_beam def a__( self : Optional[int] )-> List[Any]: """simple docstring""" UpperCAmelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase ) self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a__( self : Optional[Any] )-> Tuple: """simple docstring""" import apache_beam as beam UpperCAmelCase = beam.io.parquetio.WriteToParquet UpperCAmelCase = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' ) with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock: UpperCAmelCase = partial(lowerCAmelCase , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertTrue( os.path.exists( os.path.join( lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) ) UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset @require_beam def a__( self : Union[str, Any] )-> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def a__( self : str )-> int: """simple docstring""" UpperCAmelCase = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) ) UpperCAmelCase = builder.as_dataset() self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase ) self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase ) self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) ) del dset
210
0
import numpy as np def _A ( A__ , A__ , A__ = 1e-12 , A__ = 100 , ): """simple docstring""" assert np.shape(A__ )[0] == np.shape(A__ )[1] # Ensure proper dimensionality. assert np.shape(A__ )[0] == np.shape(A__ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(A__ ) == np.iscomplexobj(A__ ) __lowercase = np.iscomplexobj(A__ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(A__ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowercase = False __lowercase = 0 __lowercase = 0 __lowercase = 1e12 while not convergence: # Multiple matrix by the vector. __lowercase = np.dot(A__ , A__ ) # Normalize the resulting output vector. __lowercase = w / np.linalg.norm(A__ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowercase = vector.conj().T if is_complex else vector.T __lowercase = np.dot(A__ , np.dot(A__ , A__ ) ) # Check convergence. __lowercase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowercase = True __lowercase = lambda_ if is_complex: __lowercase = np.real(lambda_ ) return lambda_, vector def _A ( ): """simple docstring""" __lowercase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __lowercase = np.array([41, 4, 20] ) __lowercase = real_input_matrix.astype(np.complexaaa ) __lowercase = np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowercase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __lowercase = real_input_matrix __lowercase = real_vector elif problem_type == "complex": __lowercase = complex_input_matrix __lowercase = complex_vector # Our implementation. __lowercase , __lowercase = power_iteration(A__ , A__ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowercase , __lowercase = np.linalg.eigh(A__ ) # Last eigenvalue is the maximum one. __lowercase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowercase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(A__ ) - np.abs(A__ ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
719
'''simple docstring''' from collections.abc import Callable import numpy as np def _A ( A__ , A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = int(np.ceil((x_end - xa) / step_size ) ) __lowercase = np.zeros((n + 1,) ) __lowercase = ya __lowercase = xa for k in range(A__ ): __lowercase = y[k] + step_size * ode_func(A__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
624
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _lowerCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("""RGB""" ) return image def __UpperCAmelCase ( snake_case_ : int ) -> Optional[Any]: """simple docstring""" _lowerCAmelCase = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[int] ) -> Tuple: """simple docstring""" _lowerCAmelCase = dct.pop(snake_case_ ) _lowerCAmelCase = val def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Dict ) -> Any: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _lowerCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) _lowerCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict _lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(snake_case_ , requires_grad=snake_case_ ), v_bias) ) _lowerCAmelCase = qkv_bias def __UpperCAmelCase ( snake_case_ : int , snake_case_ : List[Any] ) -> str: """simple docstring""" _lowerCAmelCase = 364 if """coco""" in model_name else 224 _lowerCAmelCase = BlipaVisionConfig(image_size=snake_case_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _lowerCAmelCase = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=snake_case_ ).to_dict() elif "opt-6.7b" in model_name: _lowerCAmelCase = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=snake_case_ ).to_dict() elif "t5-xl" in model_name: _lowerCAmelCase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _lowerCAmelCase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _lowerCAmelCase = BlipaConfig(vision_config=snake_case_ , text_config=snake_case_ ) return config, image_size @torch.no_grad() def __UpperCAmelCase ( snake_case_ : str , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=False ) -> Tuple: """simple docstring""" _lowerCAmelCase = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _lowerCAmelCase = tokenizer("""\n""" , add_special_tokens=snake_case_ ).input_ids[0] _lowerCAmelCase , _lowerCAmelCase = get_blipa_config(snake_case_ , eos_token_id=snake_case_ ) _lowerCAmelCase = BlipaForConditionalGeneration(snake_case_ ).eval() _lowerCAmelCase = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _lowerCAmelCase , _lowerCAmelCase = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = load_model_and_preprocess( name=snake_case_ , model_type=snake_case_ , is_eval=snake_case_ , device=snake_case_ ) original_model.eval() print("""Done!""" ) # update state dict keys _lowerCAmelCase = original_model.state_dict() _lowerCAmelCase = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _lowerCAmelCase = state_dict.pop(snake_case_ ) if key.startswith("""Qformer.bert""" ): _lowerCAmelCase = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _lowerCAmelCase = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _lowerCAmelCase = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _lowerCAmelCase = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _lowerCAmelCase = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _lowerCAmelCase = key.replace("""t5""" , """language""" ) _lowerCAmelCase = val # read in qv biases read_in_q_v_bias(snake_case_ , snake_case_ ) _lowerCAmelCase , _lowerCAmelCase = hf_model.load_state_dict(snake_case_ , strict=snake_case_ ) assert len(snake_case_ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _lowerCAmelCase = load_demo_image() _lowerCAmelCase = vis_processors["""eval"""](snake_case_ ).unsqueeze(0 ).to(snake_case_ ) _lowerCAmelCase = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(snake_case_ ) # create processor _lowerCAmelCase = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=snake_case_ , image_std=snake_case_ ) _lowerCAmelCase = BlipaProcessor(image_processor=snake_case_ , tokenizer=snake_case_ ) _lowerCAmelCase = processor(images=snake_case_ , return_tensors="""pt""" ).pixel_values.to(snake_case_ ) # make sure processor creates exact same pixel values assert torch.allclose(snake_case_ , snake_case_ ) original_model.to(snake_case_ ) hf_model.to(snake_case_ ) with torch.no_grad(): if "opt" in model_name: _lowerCAmelCase = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _lowerCAmelCase = hf_model(snake_case_ , snake_case_ ).logits else: _lowerCAmelCase = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 ) _lowerCAmelCase = hf_model(snake_case_ , snake_case_ , labels=snake_case_ ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _lowerCAmelCase = torch.tensor( [[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=snake_case_ ) assert torch.allclose(logits[0, :3, :3] , snake_case_ , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _lowerCAmelCase = torch.tensor( [[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=snake_case_ ) else: # cast to same type _lowerCAmelCase = logits.dtype assert torch.allclose(original_logits.to(snake_case_ ) , snake_case_ , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _lowerCAmelCase = """""" _lowerCAmelCase = tokenizer(snake_case_ , return_tensors="""pt""" ).input_ids.to(snake_case_ ) _lowerCAmelCase = original_model.generate({"""image""": original_pixel_values} ) _lowerCAmelCase = hf_model.generate( snake_case_ , snake_case_ , do_sample=snake_case_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , snake_case_ ) _lowerCAmelCase = input_ids.shape[1] _lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case_ ) _lowerCAmelCase = [text.strip() for text in output_text] print("""HF generation:""" , snake_case_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() SCREAMING_SNAKE_CASE : str = [ '''blip2-opt-2.7b''', '''blip2-opt-6.7b''', '''blip2-opt-2.7b-coco''', '''blip2-opt-6.7b-coco''', '''blip2-flan-t5-xl''', '''blip2-flan-t5-xl-coco''', '''blip2-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''blip2-opt-2.7b''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
156
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Tuple ) -> List[Any]: """simple docstring""" _lowerCAmelCase = original_name.split(""".""" )[0] _lowerCAmelCase = key.split(""".""" ) _lowerCAmelCase = int(key_list[key_list.index(snake_case_ ) - 2] ) _lowerCAmelCase = int(key_list[key_list.index(snake_case_ ) - 1] ) _lowerCAmelCase = orig_block_num - offset _lowerCAmelCase = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase = OrderedDict() _lowerCAmelCase , _lowerCAmelCase = 0, 0 for key, value in state_dict.items(): if key.startswith("""network""" ): _lowerCAmelCase = key.replace("""network""" , """poolformer.encoder""" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("""bias""" ) and "patch_embed" not in key: patch_emb_offset += 1 _lowerCAmelCase = key[: key.find("""proj""" )] _lowerCAmelCase = key.replace(snake_case_ , F"""patch_embeddings.{total_embed_found}.""" ) _lowerCAmelCase = key.replace("""proj""" , """projection""" ) if key.endswith("""bias""" ): total_embed_found += 1 if "patch_embeddings" in key: _lowerCAmelCase = """poolformer.encoder.""" + key if "mlp.fc1" in key: _lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc1""" , """output.conv1""" ) if "mlp.fc2" in key: _lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc2""" , """output.conv2""" ) if "norm1" in key: _lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """norm1""" , """before_norm""" ) if "norm2" in key: _lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """norm2""" , """after_norm""" ) if "layer_scale_1" in key: _lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_1""" , """layer_scale_1""" ) if "layer_scale_2" in key: _lowerCAmelCase = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_2""" , """layer_scale_2""" ) if "head" in key: _lowerCAmelCase = key.replace("""head""" , """classifier""" ) _lowerCAmelCase = value return new_state_dict def __UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return image @torch.no_grad() def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : int , snake_case_ : Optional[int] ) -> str: """simple docstring""" _lowerCAmelCase = PoolFormerConfig() # set attributes based on model_name _lowerCAmelCase = """huggingface/label-files""" _lowerCAmelCase = model_name[-3:] _lowerCAmelCase = 1000 _lowerCAmelCase = """imagenet-1k-id2label.json""" _lowerCAmelCase = (1, 1000) # set config attributes _lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} _lowerCAmelCase = idalabel _lowerCAmelCase = {v: k for k, v in idalabel.items()} if size == "s12": _lowerCAmelCase = [2, 2, 6, 2] _lowerCAmelCase = [64, 128, 320, 512] _lowerCAmelCase = 4.0 _lowerCAmelCase = 0.9 elif size == "s24": _lowerCAmelCase = [4, 4, 12, 4] _lowerCAmelCase = [64, 128, 320, 512] _lowerCAmelCase = 4.0 _lowerCAmelCase = 0.9 elif size == "s36": _lowerCAmelCase = [6, 6, 18, 6] _lowerCAmelCase = [64, 128, 320, 512] _lowerCAmelCase = 4.0 _lowerCAmelCase = 1e-6 _lowerCAmelCase = 0.9 elif size == "m36": _lowerCAmelCase = [6, 6, 18, 6] _lowerCAmelCase = [96, 192, 384, 768] _lowerCAmelCase = 4.0 _lowerCAmelCase = 1e-6 _lowerCAmelCase = 0.9_5 elif size == "m48": _lowerCAmelCase = [8, 8, 24, 8] _lowerCAmelCase = [96, 192, 384, 768] _lowerCAmelCase = 4.0 _lowerCAmelCase = 1e-6 _lowerCAmelCase = 0.9_5 else: raise ValueError(F"""Size {size} not supported""" ) # load image processor _lowerCAmelCase = PoolFormerImageProcessor(crop_pct=snake_case_ ) # Prepare image _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" ).pixel_values logger.info(F"""Converting model {model_name}...""" ) # load original state dict _lowerCAmelCase = torch.load(snake_case_ , map_location=torch.device("""cpu""" ) ) # rename keys _lowerCAmelCase = rename_keys(snake_case_ ) # create HuggingFace model and load state dict _lowerCAmelCase = PoolFormerForImageClassification(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() # Define image processor _lowerCAmelCase = PoolFormerImageProcessor(crop_pct=snake_case_ ) _lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values # forward pass _lowerCAmelCase = model(snake_case_ ) _lowerCAmelCase = outputs.logits # define expected logit slices for different models if size == "s12": _lowerCAmelCase = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": _lowerCAmelCase = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": _lowerCAmelCase = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": _lowerCAmelCase = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": _lowerCAmelCase = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(F"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-2 ) # finally, save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''poolformer_s12''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
156
1
import baseaa def __UpperCAmelCase ( UpperCAmelCase )-> bytes: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def __UpperCAmelCase ( UpperCAmelCase )-> str: """simple docstring""" return baseaa.baadecode(UpperCAmelCase ).decode('''utf-8''' ) if __name__ == "__main__": A_ = "Hello World!" A_ = baseaa_encode(test) print(encoded) A_ = baseaa_decode(encoded) print(decoded)
479
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A_ = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
479
1
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class _a ( SCREAMING_SNAKE_CASE__ , unittest.TestCase): __magic_name__ = XLMProphetNetTokenizer __magic_name__ = False __magic_name__ = True def __lowercase ( self : Optional[Any] ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case : Tuple = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : Any ) -> int: snake_case : Union[str, Any] = "[PAD]" snake_case : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def __lowercase ( self : str ) -> List[Any]: snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowerCamelCase_ ) , 1012 ) def __lowercase ( self : Optional[int] ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def __lowercase ( self : str ) -> Dict: snake_case : List[str] = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) snake_case : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) snake_case : Dict = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) snake_case : int = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def __lowercase ( self : Optional[Any] ) -> List[str]: return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def __lowercase ( self : Any ) -> Optional[Any]: snake_case : List[str] = "Hello World!" snake_case : Optional[Any] = [35389, 6672, 49, 2] self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) ) @slow def __lowercase ( self : str ) -> Union[str, Any]: # fmt: off snake_case : Dict = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
449
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def _A ( _a : str , _a : str ): """simple docstring""" A = list(_a ) A = list(_a ) A = 0 for i in range(len(_a ) ): if lista[i] != lista[i]: count += 1 A = """_""" if count > 1: return False else: return "".join(_a ) def _A ( _a : list[str] ): """simple docstring""" A = [] while True: A = ["""$"""] * len(_a ) A = [] for i in range(len(_a ) ): for j in range(i + 1 , len(_a ) ): A = compare_string(binary[i] , binary[j] ) if k is False: A = """*""" A = """*""" temp.append("""X""" ) for i in range(len(_a ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_a ) == 0: return pi A = list(set(_a ) ) def _A ( _a : int , _a : Sequence[float] ): """simple docstring""" A = [] for minterm in minterms: A = """""" for _ in range(_a ): A = str(minterm % 2 ) + string minterm //= 2 temp.append(_a ) return temp def _A ( _a : str , _a : str , _a : int ): """simple docstring""" A = list(_a ) A = list(_a ) A = 0 for i in range(len(_a ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def _A ( _a : list[list[int]] , _a : list[str] ): """simple docstring""" A = [] A = [0] * len(_a ) for i in range(len(chart[0] ) ): A = 0 A = -1 for j in range(len(_a ) ): if chart[j][i] == 1: count += 1 A = j if count == 1: A = 1 for i in range(len(_a ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_a ) ): A = 0 temp.append(prime_implicants[i] ) while True: A = 0 A = -1 A = 0 for i in range(len(_a ) ): A = chart[i].count(1 ) if count_n > max_n: A = count_n A = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_a ) ): A = 0 def _A ( _a : list[str] , _a : list[str] ): """simple docstring""" A = [[0 for x in range(len(_a ) )] for x in range(len(_a ) )] for i in range(len(_a ) ): A = prime_implicants[i].count("""_""" ) for j in range(len(_a ) ): if is_for_table(prime_implicants[i] , binary[j] , _a ): A = 1 return chart def _A ( ): """simple docstring""" A = int(input("""Enter the no. of variables\n""" ) ) A = [ float(_a ) for x in input( """Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split() ] A = decimal_to_binary(_a , _a ) A = check(_a ) print("""Prime Implicants are:""" ) print(_a ) A = prime_implicant_chart(_a , _a ) A = selection(_a , _a ) print("""Essential Prime Implicants are:""" ) print(_a ) if __name__ == "__main__": import doctest doctest.testmod() main()
617
0
import os import sys __UpperCamelCase : List[str] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __UpperCamelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ): """simple docstring""" return AutoConfig.from_pretrained(*UpperCAmelCase , **UpperCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : Any , **UpperCAmelCase : Any ): """simple docstring""" return AutoTokenizer.from_pretrained(*UpperCAmelCase , **UpperCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): """simple docstring""" return AutoModel.from_pretrained(*UpperCAmelCase , **UpperCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : Tuple , **UpperCAmelCase : str ): """simple docstring""" return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ): """simple docstring""" return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ): """simple docstring""" return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase , **UpperCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _UpperCAmelCase ( *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ): """simple docstring""" return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
458
def _UpperCAmelCase ( UpperCAmelCase : str ): """simple docstring""" __lowerCamelCase : List[Any] = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) __lowerCamelCase : Dict = hex_num[0] == """-""" if is_negative: __lowerCamelCase : Optional[Any] = hex_num[1:] try: __lowerCamelCase : Any = int(UpperCAmelCase , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) __lowerCamelCase : List[str] = """""" while int_num > 0: __lowerCamelCase : Dict = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
458
1
import json import sys def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): with open(lowerCamelCase , encoding="""utf-8""" ) as f: __magic_name__ : Tuple =json.load(lowerCamelCase ) __magic_name__ : Optional[Any] =["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """] for benchmark_name in sorted(lowerCamelCase ): __magic_name__ : Union[str, Any] =results[benchmark_name] __magic_name__ : Union[str, Any] =benchmark_name.split("""/""" )[-1] output_md.append(F"### Benchmark: {benchmark_file_name}" ) __magic_name__ : Tuple ="""| metric |""" __magic_name__ : Optional[Any] ="""|--------|""" __magic_name__ : List[Any] ="""| new / old (diff) |""" for metric_name in sorted(lowerCamelCase ): __magic_name__ : Optional[int] =benchmark_res[metric_name] __magic_name__ : str =metric_vals["""new"""] __magic_name__ : Tuple =metric_vals.get("""old""" , lowerCamelCase ) __magic_name__ : Tuple =metric_vals.get("""diff""" , lowerCamelCase ) __magic_name__ : List[Any] =F" {new_val:f}" if isinstance(lowerCamelCase , (int, float) ) else """None""" if old_val is not None: val_str += F" / {old_val:f}" if isinstance(lowerCamelCase , (int, float) ) else "None" if dif_val is not None: val_str += F" ({dif_val:f})" if isinstance(lowerCamelCase , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append("""</details>""" ) with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f: f.writelines("""\n""".join(lowerCamelCase ) ) if __name__ == "__main__": UpperCAmelCase_ : Tuple = sys.argv[1] UpperCAmelCase_ : List[Any] = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
21
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCAmelCase_ ( A ): '''simple docstring''' _a : Dict = args.pruning_method _a : Optional[Any] = args.threshold _a : Union[str, Any] = args.model_name_or_path.rstrip('/' ) _a : Dict = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) _a : List[str] = torch.load(os.path.join(A , 'pytorch_model.bin' ) ) _a : str = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _a : List[Any] = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: _a : List[str] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: _a : Optional[Any] = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": _a : Tuple = MagnitudeBinarizer.apply(inputs=A , threshold=A ) _a : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue _a : str = name[:-6] _a : int = model[f'''{prefix_}mask_scores'''] _a : List[str] = TopKBinarizer.apply(A , A ) _a : Tuple = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _a : str = name[:-6] _a : List[str] = model[f'''{prefix_}mask_scores'''] _a : Tuple = ThresholdBinarizer.apply(A , A , A ) _a : Union[str, Any] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue _a : Optional[int] = name[:-6] _a : Any = model[f'''{prefix_}mask_scores'''] _a , _a : Optional[int] = -0.1, 1.1 _a : List[str] = torch.sigmoid(A ) _a : Dict = s * (r - l) + l _a : int = s_bar.clamp(min=0.0 , max=1.0 ) _a : List[str] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: _a : Tuple = os.path.join( os.path.dirname(A ) , f'''bertarized_{os.path.basename(A )}''' ) if not os.path.isdir(A ): shutil.copytree(A , A ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(A , os.path.join(A , 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": UpperCAmelCase_ : str = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) UpperCAmelCase_ : Optional[Any] = parser.parse_args() main(args)
120
0
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = IFInpaintingSuperResolutionPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} UpperCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCAmelCase_ ( self: Any ): '''simple docstring''' return self._get_superresolution_dummy_components() def UpperCAmelCase_ ( self: str , __lowerCamelCase: Any , __lowerCamelCase: Dict=0 ): '''simple docstring''' if str(UpperCamelCase__ ).startswith("mps" ): UpperCamelCase__: Optional[Any] = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase__: str = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase__: str = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) UpperCamelCase__: List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) UpperCamelCase__: List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) UpperCamelCase__: Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def UpperCAmelCase_ ( self: Optional[int] ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def UpperCAmelCase_ ( self: List[str] ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCAmelCase_ ( self: int ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' self._test_save_load_local() def UpperCAmelCase_ ( self: str ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
700
def lowerCAmelCase_ ( A_ = 50): UpperCamelCase__: Optional[int] = [1] * (length + 1) for row_length in range(length + 1): for tile_length in range(2 ,5): for tile_start in range(row_length - tile_length + 1): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"{solution() = }")
221
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase : List[Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") lowerCamelCase : Optional[int] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A__ : A__ = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) A__ = field( default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) A__ = field( default=A__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) A__ = field(default=A__ , metadata={'help': 'A folder containing the training data.'} ) A__ = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} ) A__ = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) A__ = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} ) A__ = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) A__ = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) A__ = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def A ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE ={} if self.train_dir is not None: _SCREAMING_SNAKE_CASE =self.train_dir if self.validation_dir is not None: _SCREAMING_SNAKE_CASE =self.validation_dir _SCREAMING_SNAKE_CASE =data_files if data_files else None @dataclass class A__ : A__ = field( default=A__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) A__ = field( default=A__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(A__ )} , ) A__ = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A__ = field( default=A__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) A__ = field( default=A__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) A__ = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) A__ = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} ) A__ = field( default=A__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) A__ = field( default=A__ , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) A__ = field( default=A__ , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) A__ = field( default=A__ , metadata={'help': 'Stride to use for the encoder.'} , ) class A__ : def __init__( self : Dict , _a : str=192 , _a : Dict=32 , _a : Dict=4 , _a : Tuple=0.6 ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =input_size _SCREAMING_SNAKE_CASE =mask_patch_size _SCREAMING_SNAKE_CASE =model_patch_size _SCREAMING_SNAKE_CASE =mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size' ) _SCREAMING_SNAKE_CASE =self.input_size // self.mask_patch_size _SCREAMING_SNAKE_CASE =self.mask_patch_size // self.model_patch_size _SCREAMING_SNAKE_CASE =self.rand_size**2 _SCREAMING_SNAKE_CASE =int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.random.permutation(self.token_count )[: self.mask_count] _SCREAMING_SNAKE_CASE =np.zeros(self.token_count , dtype=lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =mask.reshape((self.rand_size, self.rand_size) ) _SCREAMING_SNAKE_CASE =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def _lowerCAmelCase ( _UpperCamelCase : str ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] ) _SCREAMING_SNAKE_CASE =torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _lowerCAmelCase ( ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _SCREAMING_SNAKE_CASE =training_args.get_process_log_level() logger.setLevel(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(f"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. _SCREAMING_SNAKE_CASE =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _SCREAMING_SNAKE_CASE =load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _SCREAMING_SNAKE_CASE =None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0: _SCREAMING_SNAKE_CASE =ds['''train'''].train_test_split(data_args.train_val_split ) _SCREAMING_SNAKE_CASE =split['''train'''] _SCREAMING_SNAKE_CASE =split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE ={ '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase__ ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: _SCREAMING_SNAKE_CASE =CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(f"New config: {config}" ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(lowercase__ , 'decoder_type' ): _SCREAMING_SNAKE_CASE ='''simmim''' # adapt config _SCREAMING_SNAKE_CASE =model_args.image_size if model_args.image_size is not None else config.image_size _SCREAMING_SNAKE_CASE =model_args.patch_size if model_args.patch_size is not None else config.patch_size _SCREAMING_SNAKE_CASE =( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: _SCREAMING_SNAKE_CASE ={ conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _SCREAMING_SNAKE_CASE =IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _SCREAMING_SNAKE_CASE =AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _SCREAMING_SNAKE_CASE =AutoModelForMaskedImageModeling.from_config(lowercase__ ) if training_args.do_train: _SCREAMING_SNAKE_CASE =ds['''train'''].column_names else: _SCREAMING_SNAKE_CASE =ds['''validation'''].column_names if data_args.image_column_name is not None: _SCREAMING_SNAKE_CASE =data_args.image_column_name elif "image" in column_names: _SCREAMING_SNAKE_CASE ='''image''' elif "img" in column_names: _SCREAMING_SNAKE_CASE ='''img''' else: _SCREAMING_SNAKE_CASE =column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _SCREAMING_SNAKE_CASE =Compose( [ Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _SCREAMING_SNAKE_CASE =MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_UpperCamelCase : int ): _SCREAMING_SNAKE_CASE =[transforms(lowercase__ ) for image in examples[image_column_name]] _SCREAMING_SNAKE_CASE =[mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _SCREAMING_SNAKE_CASE =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(lowercase__ ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _SCREAMING_SNAKE_CASE =( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(lowercase__ ) # Initialize our trainer _SCREAMING_SNAKE_CASE =Trainer( model=lowercase__ , args=lowercase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: _SCREAMING_SNAKE_CASE =None if training_args.resume_from_checkpoint is not None: _SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint elif last_checkpoint is not None: _SCREAMING_SNAKE_CASE =last_checkpoint _SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _SCREAMING_SNAKE_CASE =trainer.evaluate() trainer.log_metrics('eval' , lowercase__ ) trainer.save_metrics('eval' , lowercase__ ) # Write model card and (optionally) push to hub _SCREAMING_SNAKE_CASE ={ '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) if __name__ == "__main__": main()
405
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { """configuration_time_series_transformer""": [ """TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimeSeriesTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimeSeriesTransformerForPrediction""", """TimeSeriesTransformerModel""", """TimeSeriesTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
152
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } UpperCamelCase = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off UpperCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class _lowerCamelCase ( UpperCamelCase ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = ["input_ids", "attention_mask"] snake_case = MBartTokenizer snake_case = [] snake_case = [] def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )->str: '''simple docstring''' A_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) A_ : Union[str, Any] = vocab_file A_ : List[Any] = False if not self.vocab_file else True A_ : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) A_ : List[Any] = { lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES } A_ : int = src_lang if src_lang is not None else '''en_XX''' A_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) A_ : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _snake_case ( self )->str: '''simple docstring''' return self._src_lang @src_lang.setter def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None: '''simple docstring''' A_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[int]: '''simple docstring''' A_ : List[str] = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) A_ : str = src_lang A_ : Optional[int] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) A_ : Any = tgt_lang_id return inputs def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , )->BatchEncoding: '''simple docstring''' A_ : Optional[int] = src_lang A_ : Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _snake_case ( self )->Union[str, Any]: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None: '''simple docstring''' A_ : str = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) A_ : int = [] A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] A_ : int = self.convert_ids_to_tokens(self.prefix_tokens ) A_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) A_ : Dict = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )->None: '''simple docstring''' A_ : Union[str, Any] = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) A_ : str = [] A_ : Tuple = [self.eos_token_id, self.cur_lang_code] A_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) A_ : Any = self.convert_ids_to_tokens(self.suffix_tokens ) A_ : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return A_ : Any = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
152
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __lowerCamelCase ( unittest.TestCase ): def UpperCAmelCase__ ( self ): lowerCamelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase ) lowerCamelCase_ = -1 lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase ) lowerCamelCase_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase ) lowerCamelCase_ = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase_ = TextStreamer(UpperCAmelCase ) model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase_ = cs.out[:-1] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase ) lowerCamelCase_ = -1 lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase ) lowerCamelCase_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase ) lowerCamelCase_ = tokenizer.decode(greedy_ids[0] ) lowerCamelCase_ = TextIteratorStreamer(UpperCAmelCase ) lowerCamelCase_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase_ = Thread(target=model.generate , kwargs=UpperCAmelCase ) thread.start() lowerCamelCase_ = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase ) lowerCamelCase_ = -1 lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase ) lowerCamelCase_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase ) lowerCamelCase_ = greedy_ids[:, input_ids.shape[1] :] lowerCamelCase_ = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowerCamelCase_ = TextStreamer(UpperCAmelCase , skip_prompt=UpperCAmelCase ) model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowerCamelCase_ = cs.out[:-1] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase__ ( self ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowerCamelCase_ = AutoTokenizer.from_pretrained('''distilgpt2''' ) lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCAmelCase ) lowerCamelCase_ = -1 lowerCamelCase_ = torch.ones((1, 5) , device=UpperCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowerCamelCase_ = TextStreamer(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) model.generate(UpperCAmelCase , max_new_tokens=1 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowerCamelCase_ = cs.out[:-1] # Remove the final "\n" lowerCamelCase_ = tokenizer(UpperCAmelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCAmelCase ) lowerCamelCase_ = -1 lowerCamelCase_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase ) lowerCamelCase_ = TextIteratorStreamer(UpperCAmelCase , timeout=0.0_0_1 ) lowerCamelCase_ = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} lowerCamelCase_ = Thread(target=model.generate , kwargs=UpperCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(UpperCAmelCase ): lowerCamelCase_ = '''''' for new_text in streamer: streamer_text += new_text
29
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ ) def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ ) lowerCamelCase_ = emb.weight.data return lin_layer def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ): lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model'''] remove_ignore_keys_(lowerCAmelCase__ ) lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ ) if mbart_aa and finetuned: lowerCamelCase_ = '''relu''' lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight'''] lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ ) model.model.load_state_dict(lowerCAmelCase__ ) if finetuned: lowerCamelCase_ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default="""facebook/mbart-large-cc25""", type=str, help="""Which huggingface architecture to use: mbart-large""", ) parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""") parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""") A_ = parser.parse_args() A_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
29
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _SCREAMING_SNAKE_CASE = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _SCREAMING_SNAKE_CASE = { "camembert-base": 512, } _SCREAMING_SNAKE_CASE = "▁" class _lowerCAmelCase ( lowerCAmelCase__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["input_ids", "attention_mask"] snake_case_ = CamembertTokenizer def __init__( self : Tuple , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : Dict="<s>" , __snake_case : int="</s>" , __snake_case : List[Any]="</s>" , __snake_case : Tuple="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Optional[int]="<pad>" , __snake_case : Tuple="<mask>" , __snake_case : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **__snake_case : Any , )-> Dict: snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , ) snake_case = vocab_file snake_case = False if not self.vocab_file else True def lowerCAmelCase ( self : Dict , __snake_case : Tuple , __snake_case : str = None )-> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case = [self.cls_token_id] snake_case = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : int , __snake_case : Union[str, Any] , __snake_case : List[str] = None )-> List[int]: snake_case = [self.sep_token_id] snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : Optional[int] , __snake_case : Dict , __snake_case : Tuple = None )-> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_lowerCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case = os.path.join( _lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
712
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: snake_case = ksize + 1 snake_case = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__lowerCAmelCase ): for x in range(__lowerCAmelCase ): # distance from center snake_case = x - ksize // 2 snake_case = y - ksize // 2 # degree to radiant snake_case = theta / 1_80 * np.pi snake_case = np.cos(_theta ) snake_case = np.sin(_theta ) # get kernel x snake_case = cos_theta * px + sin_theta * py # get kernel y snake_case = -sin_theta * px + cos_theta * py # fill kernel snake_case = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image _SCREAMING_SNAKE_CASE = imread("../image_data/lena.jpg") # turn image in gray scale value _SCREAMING_SNAKE_CASE = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges _SCREAMING_SNAKE_CASE = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: _SCREAMING_SNAKE_CASE = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) _SCREAMING_SNAKE_CASE = out / out.max() * 255 _SCREAMING_SNAKE_CASE = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
517
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _A ( _lowerCamelCase ): def __init__( self : Tuple , _A : Any , _A : Dict ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : Dict , _A : Tuple = 1 , _A : int = 100 , _A : int = None , _A : List[str] = None , _A : Tuple = True , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if audio_length_in_s is None: lowercase : Any = self.unet.config.sample_size / self.unet.config.sample_rate lowercase : List[str] = audio_length_in_s * self.unet.config.sample_rate lowercase : List[Any] = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) lowercase : List[Any] = int(_A ) if sample_size % down_scale_factor != 0: lowercase : Union[str, Any] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" ''' process.''' ) lowercase : Any = int(_A ) lowercase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype lowercase : Dict = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowercase : Dict = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) # set step values self.scheduler.set_timesteps(_A , device=audio.device ) lowercase : List[Any] = self.scheduler.timesteps.to(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase : Any = self.unet(_A , _A ).sample # 2. compute previous image: x_t -> t_t-1 lowercase : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample lowercase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() lowercase : Optional[Any] = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=_A )
217
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
539
0
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCamelCase = logging.getLogger(__name__) __lowerCamelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__: lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__A )} , ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class UpperCamelCase__: lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'The input training data file (a text file).'} ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) lowerCAmelCase__ : Optional[str] = field( default=__A , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) lowerCAmelCase__ : bool = field( default=__A , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) lowerCAmelCase__ : bool = field( default=__A , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) lowerCAmelCase__ : bool = field(default=__A , metadata={'help': 'Whether ot not to use whole word mask.'} ) lowerCAmelCase__ : float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) lowerCAmelCase__ : float = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) lowerCAmelCase__ : int = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) lowerCAmelCase__ : int = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) lowerCAmelCase__ : bool = field( default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , ): """simple docstring""" def _dataset(UpperCamelCase__ , UpperCamelCase__=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , ) return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size ) else: return TextDataset( tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def UpperCAmelCase ( ): """simple docstring""" A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCamelCase__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: A__ = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: A__ = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) A__ = AutoModelWithLMHead.from_config(UpperCamelCase__ ) model.resize_token_embeddings(len(UpperCamelCase__ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: A__ = tokenizer.max_len # Our input block size will be the max possible for the model else: A__ = min(data_args.block_size , tokenizer.max_len ) # Get datasets A__ = ( get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) A__ = ( get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": A__ = DataCollatorForPermutationLanguageModeling( tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: A__ = DataCollatorForWholeWordMask( tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability ) else: A__ = DataCollatorForLanguageModeling( tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A__ = Trainer( model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , ) # Training if training_args.do_train: A__ = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=UpperCamelCase__ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A__ = trainer.evaluate() A__ = math.exp(eval_output['eval_loss'] ) A__ = {'perplexity': perplexity} A__ = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(UpperCamelCase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , UpperCamelCase__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(UpperCamelCase__ ) return results def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" main() if __name__ == "__main__": main()
536
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ , A__ = [], [] while len(UpperCamelCase__ ) > 1: A__ , A__ = min(UpperCamelCase__ ), max(UpperCamelCase__ ) start.append(UpperCamelCase__ ) end.append(UpperCamelCase__ ) collection.remove(UpperCamelCase__ ) collection.remove(UpperCamelCase__ ) end.reverse() return start + collection + end if __name__ == "__main__": __lowerCamelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCamelCase = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
536
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
633
"""simple docstring""" from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake a : int = numpy.array([0, 0]) a : Optional[Any] = numpy.array([0.5, 0.866_0254]) a : Tuple = numpy.array([1, 0]) a : List[str] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def _SCREAMING_SNAKE_CASE ( _lowercase : list[numpy.ndarray] , _lowercase : int ) ->list[numpy.ndarray]: '''simple docstring''' a : List[str] = initial_vectors for _ in range(_lowercase ): a : Optional[Any] = iteration_step(_lowercase ) return vectors def _SCREAMING_SNAKE_CASE ( _lowercase : list[numpy.ndarray] ) ->list[numpy.ndarray]: '''simple docstring''' a : Union[str, Any] = [] for i, start_vector in enumerate(vectors[:-1] ): a : str = vectors[i + 1] new_vectors.append(_lowercase ) a : Optional[int] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def _SCREAMING_SNAKE_CASE ( _lowercase : numpy.ndarray , _lowercase : float ) ->numpy.ndarray: '''simple docstring''' a : int = numpy.radians(_lowercase ) a, a : Optional[int] = numpy.cos(_lowercase ), numpy.sin(_lowercase ) a : Dict = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_lowercase , _lowercase ) def _SCREAMING_SNAKE_CASE ( _lowercase : list[numpy.ndarray] ) ->None: '''simple docstring''' a : Dict = plt.gca() axes.set_aspect("equal" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() a, a : Any = zip(*_lowercase ) plt.plot(_lowercase , _lowercase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() a : Optional[int] = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
633
1
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( __A , unittest.TestCase ): '''simple docstring''' _lowercase = LongformerTokenizer _lowercase = True _lowercase = LongformerTokenizerFast _lowercase = True def __lowerCamelCase ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE_ : int =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] SCREAMING_SNAKE_CASE_ : str =dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE_ : List[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] SCREAMING_SNAKE_CASE_ : Optional[Any] ={'unk_token': '<unk>'} SCREAMING_SNAKE_CASE_ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE_ : str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCAmelCase ) ) def __lowerCamelCase ( self , **__UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCamelCase ( self , **__UpperCAmelCase ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __lowerCamelCase ( self , __UpperCAmelCase ): SCREAMING_SNAKE_CASE_ : List[str] ='lower newer' SCREAMING_SNAKE_CASE_ : int ='lower newer' return input_text, output_text def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : int =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE_ : Union[str, Any] ='lower newer' SCREAMING_SNAKE_CASE_ : int =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_ : List[Any] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase ) def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Tuple =self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=__UpperCAmelCase ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=__UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) SCREAMING_SNAKE_CASE_ : Any =tokenizer.encode('sequence builders' , add_special_tokens=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =tokenizer.encode('multi-sequence build' , add_special_tokens=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.encode( 'sequence builders' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : int =tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __lowerCamelCase ( self ): SCREAMING_SNAKE_CASE_ : str =self.get_tokenizer() SCREAMING_SNAKE_CASE_ : Dict ='Encode this sequence.' SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE_ : str =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Any =tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str =tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict =tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE_ : Dict ='<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] ='Encode <mask> sequence' SCREAMING_SNAKE_CASE_ : int ='Encode <mask>sequence' SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer.encode(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =encoded.index(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer.encode(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] =encoded.index(__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict ='A, <mask> AllenNLP sentence.' SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str =tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) SCREAMING_SNAKE_CASE_ : Tuple =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( __UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def __lowerCamelCase ( self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): SCREAMING_SNAKE_CASE_ : Any =self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , __UpperCAmelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , __UpperCAmelCase ) self.assertEqual(post_processor_state['trim_offsets'] , __UpperCAmelCase ) def __lowerCamelCase ( self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): SCREAMING_SNAKE_CASE_ : Optional[int] ='hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE_ : str =F"""{text_of_1_token} {text_of_1_token}""" SCREAMING_SNAKE_CASE_ : Optional[int] =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Any =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) SCREAMING_SNAKE_CASE_ : str =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Tuple =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) SCREAMING_SNAKE_CASE_ : Any =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) SCREAMING_SNAKE_CASE_ : int =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Dict =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) SCREAMING_SNAKE_CASE_ : List[str] =F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE_ : int =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) SCREAMING_SNAKE_CASE_ : Dict =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Any =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , ) SCREAMING_SNAKE_CASE_ : List[str] =self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
153
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : int =tmp_path / 'file.csv' SCREAMING_SNAKE_CASE_ : Union[str, Any] =textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(lowerCAmelCase_ ,'w' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] =tmp_path / 'malformed_file.csv' SCREAMING_SNAKE_CASE_ : Tuple =textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(lowerCAmelCase_ ,'w' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple =tmp_path / 'csv_with_image.csv' SCREAMING_SNAKE_CASE_ : str =textwrap.dedent( F"""\ image {image_file} """ ) with open(lowerCAmelCase_ ,'w' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] =tmp_path / 'csv_with_label.csv' SCREAMING_SNAKE_CASE_ : str =textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(lowerCAmelCase_ ,'w' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : int =tmp_path / 'csv_with_int_list.csv' SCREAMING_SNAKE_CASE_ : List[str] =textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(lowerCAmelCase_ ,'w' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int =Csv() SCREAMING_SNAKE_CASE_ : int =csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCAmelCase_ ,match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(lowerCAmelCase_ ) in record.message for record in caplog.records ) @require_pil def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[Any]: """simple docstring""" with open(lowerCAmelCase_ ,encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ : Optional[int] =f.read().splitlines()[1] SCREAMING_SNAKE_CASE_ : Optional[Any] =Csv(encoding='utf-8' ,features=Features({'image': Image()} ) ) SCREAMING_SNAKE_CASE_ : Tuple =csv._generate_tables([[csv_file_with_image]] ) SCREAMING_SNAKE_CASE_ : List[Any] =pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() SCREAMING_SNAKE_CASE_ : int =pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> str: """simple docstring""" with open(lowerCAmelCase_ ,encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE_ : Optional[Any] =f.read().splitlines()[1:] SCREAMING_SNAKE_CASE_ : List[str] =Csv(encoding='utf-8' ,features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) SCREAMING_SNAKE_CASE_ : Optional[Any] =csv._generate_tables([[csv_file_with_label]] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] =pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() SCREAMING_SNAKE_CASE_ : Union[str, Any] =pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(lowerCAmelCase_ ) for label in labels] def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : int =Csv(encoding='utf-8' ,sep=',' ,converters={'int_list': lambda lowerCAmelCase_ : [int(lowerCAmelCase_ ) for i in x.split()]} ) SCREAMING_SNAKE_CASE_ : Optional[int] =csv._generate_tables([[csv_file_with_int_list]] ) SCREAMING_SNAKE_CASE_ : List[str] =pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) SCREAMING_SNAKE_CASE_ : int =pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
153
1
'''simple docstring''' import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Optional[int] = tmp_path / "cache" _UpperCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _UpperCamelCase : Tuple = SqlDatasetReader( 'dataset' , 'sqlite:///' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @require_sqlalchemy @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Optional[Any] = tmp_path / "cache" _UpperCamelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _UpperCamelCase : int = features.copy() if features else default_expected_features _UpperCamelCase : List[str] = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _UpperCamelCase : int = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def A__ ( UpperCAmelCase_ ): with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con: _UpperCamelCase : Tuple = con.cursor() cur.execute('SELECT * FROM dataset' ) for row in cur: yield row @require_sqlalchemy def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Dict = tmp_path / "cache" _UpperCamelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE , 'tmp.sql' ) _UpperCamelCase : List[str] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read() SqlDatasetWriter(_SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write() _UpperCamelCase : int = iter_sql_file(_SCREAMING_SNAKE_CASE ) _UpperCamelCase : List[Any] = iter_sql_file(_SCREAMING_SNAKE_CASE ) for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert rowa == rowa @require_sqlalchemy def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Dict = tmp_path / "cache" _UpperCamelCase : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'tmp.sql' ) _UpperCamelCase : Tuple = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read() SqlDatasetWriter(_SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write() _UpperCamelCase : Dict = iter_sql_file(_SCREAMING_SNAKE_CASE ) _UpperCamelCase : Tuple = iter_sql_file(_SCREAMING_SNAKE_CASE ) for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): assert rowa == rowa @require_sqlalchemy def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : List[Any] = tmp_path / "cache" _UpperCamelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'tmp.sql' ) _UpperCamelCase : List[Any] = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read() with pytest.raises(_SCREAMING_SNAKE_CASE ): SqlDatasetWriter(_SCREAMING_SNAKE_CASE , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
195
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowerCamelCase : Tuple = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ("encoder.deit.cls_token", "encoder.embeddings.cls_token"), ("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"), ("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"), ("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"), ("encoder.deit.norm.weight", "encoder.layernorm.weight"), ("encoder.deit.norm.bias", "encoder.layernorm.bias"), ] ) return rename_keys def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) lowerCamelCase : Dict = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) lowerCamelCase : Dict = in_proj_weight[ : encoder_config.hidden_size, : ] lowerCamelCase : int = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] lowerCamelCase : int = in_proj_weight[ -encoder_config.hidden_size :, : ] def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowerCamelCase : Optional[int] = dct.pop(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = val def A ( _SCREAMING_SNAKE_CASE ) -> int: if "handwritten" in checkpoint_url: lowerCamelCase : Optional[Any] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCamelCase : Optional[int] = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg" lowerCamelCase : Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" ) return im @torch.no_grad() def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple: lowerCamelCase : Optional[Any] = ViTConfig(image_size=384 ,qkv_bias=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: lowerCamelCase : Tuple = 768 elif "large" in checkpoint_url: # use ViT-large encoder lowerCamelCase : str = 1024 lowerCamelCase : Any = 4096 lowerCamelCase : str = 24 lowerCamelCase : Optional[Any] = 16 lowerCamelCase : Any = 1024 else: raise ValueError("Should either find 'base' or 'large' in checkpoint URL" ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = "relu" lowerCamelCase : str = 1024 lowerCamelCase : Optional[int] = True lowerCamelCase : Any = False lowerCamelCase : Any = False # load HuggingFace model lowerCamelCase : Dict = ViTModel(_SCREAMING_SNAKE_CASE ,add_pooling_layer=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[Any] = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ) model.eval() # load state_dict of original model, rename some keys lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE ,map_location="cpu" ,check_hash=_SCREAMING_SNAKE_CASE )["model"] lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): lowerCamelCase : List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE ) if key.startswith("decoder" ) and "output_projection" not in key: lowerCamelCase : Optional[int] = val else: lowerCamelCase : int = val # load state dict model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check outputs on an image lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size ) lowerCamelCase : Any = RobertaTokenizer.from_pretrained("roberta-large" ) lowerCamelCase : Tuple = TrOCRProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) ,return_tensors="pt" ).pixel_values # verify logits lowerCamelCase : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) lowerCamelCase : int = model(pixel_values=_SCREAMING_SNAKE_CASE ,decoder_input_ids=_SCREAMING_SNAKE_CASE ) lowerCamelCase : Tuple = outputs.logits lowerCamelCase : Any = torch.Size([1, 1, 5_0265] ) if "trocr-base-handwritten" in checkpoint_url: lowerCamelCase : Tuple = torch.tensor( [-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] ) elif "trocr-large-handwritten" in checkpoint_url: lowerCamelCase : Dict = torch.tensor( [-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] ) elif "trocr-base-printed" in checkpoint_url: lowerCamelCase : Optional[Any] = torch.tensor( [-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] ) elif "trocr-large-printed" in checkpoint_url: lowerCamelCase : List[Any] = torch.tensor( [-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :10] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "First elements of logits not as expected" Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
311
0
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=1_0_0_0 ): if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd SCREAMING_SNAKE_CASE = n - 1 SCREAMING_SNAKE_CASE = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) SCREAMING_SNAKE_CASE = 0 while count < prec: SCREAMING_SNAKE_CASE = random.randint(2, n - 1 ) SCREAMING_SNAKE_CASE = bin_exp_mod(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) if b != 1: SCREAMING_SNAKE_CASE = True for _ in range(SCREAMING_SNAKE_CASE_ ): if b == n - 1: SCREAMING_SNAKE_CASE = False break SCREAMING_SNAKE_CASE = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": snake_case = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
406
"""simple docstring""" from functools import reduce snake_case = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str(int(SCREAMING_SNAKE_CASE_ ) * int(SCREAMING_SNAKE_CASE_ ) ), n[i : i + 1_3] ) ) for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1_2 ) ) if __name__ == "__main__": print(f'{solution() = }')
406
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def _a ( self ): """simple docstring""" snake_case_ :str = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 1_28, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 1_42, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } snake_case_ :Dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 1_28, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 1_42, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(a ) , a ) def _a ( self ): """simple docstring""" snake_case_ :Tuple = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) ) snake_case_ :str = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) ) @require_torch def _a ( self ): """simple docstring""" snake_case_ :Dict = np.random.randn(3 , 4 ) snake_case_ :int = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) snake_case_ :Dict = np.random.randn(3 , 4 , 5 ) snake_case_ :Any = torch.tensor(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_tf def _a ( self ): """simple docstring""" snake_case_ :Any = np.random.randn(3 , 4 ) snake_case_ :Dict = tf.constant(a ) self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) ) snake_case_ :int = np.random.randn(3 , 4 , 5 ) snake_case_ :Optional[Any] = tf.constant(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) ) @require_flax def _a ( self ): """simple docstring""" snake_case_ :Union[str, Any] = np.random.randn(3 , 4 ) snake_case_ :Optional[int] = jnp.array(a ) self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) ) snake_case_ :str = np.random.randn(3 , 4 , 5 ) snake_case_ :List[str] = jnp.array(a ) self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) ) def _a ( self ): """simple docstring""" snake_case_ :Optional[int] = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) ) snake_case_ :Any = np.random.randn(3 , 4 , 5 ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) ) @require_torch def _a ( self ): """simple docstring""" snake_case_ :List[str] = np.random.randn(3 , 4 ) snake_case_ :str = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) snake_case_ :Tuple = np.random.randn(3 , 4 , 5 ) snake_case_ :Tuple = torch.tensor(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_tf def _a ( self ): """simple docstring""" snake_case_ :Tuple = np.random.randn(3 , 4 ) snake_case_ :List[Any] = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) ) snake_case_ :Union[str, Any] = np.random.randn(3 , 4 , 5 ) snake_case_ :int = tf.constant(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) ) @require_flax def _a ( self ): """simple docstring""" snake_case_ :List[Any] = np.random.randn(3 , 4 ) snake_case_ :List[str] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) ) snake_case_ :Tuple = np.random.randn(3 , 4 , 5 ) snake_case_ :Optional[Any] = jnp.array(a ) self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) ) def _a ( self ): """simple docstring""" snake_case_ :Union[str, Any] = np.random.randn(1 , 3 , 4 ) self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) ) snake_case_ :Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) ) @require_torch def _a ( self ): """simple docstring""" snake_case_ :int = np.random.randn(1 , 3 , 4 ) snake_case_ :Any = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) snake_case_ :Dict = np.random.randn(1 , 4 , 1 , 5 ) snake_case_ :str = torch.tensor(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_tf def _a ( self ): """simple docstring""" snake_case_ :Tuple = np.random.randn(1 , 3 , 4 ) snake_case_ :Any = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) ) snake_case_ :Dict = np.random.randn(1 , 4 , 1 , 5 ) snake_case_ :Tuple = tf.constant(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) ) @require_flax def _a ( self ): """simple docstring""" snake_case_ :List[str] = np.random.randn(1 , 3 , 4 ) snake_case_ :Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) ) snake_case_ :List[str] = np.random.randn(1 , 4 , 1 , 5 ) snake_case_ :Union[str, Any] = jnp.array(a ) self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) ) def _a ( self ): """simple docstring""" snake_case_ :int = np.random.randn(3 , 4 ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) ) @require_torch def _a ( self ): """simple docstring""" snake_case_ :Optional[Any] = np.random.randn(3 , 4 ) snake_case_ :Dict = torch.tensor(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_tf def _a ( self ): """simple docstring""" snake_case_ :List[str] = np.random.randn(3 , 4 ) snake_case_ :int = tf.constant(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) ) @require_flax def _a ( self ): """simple docstring""" snake_case_ :str = np.random.randn(3 , 4 ) snake_case_ :Dict = jnp.array(a ) self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
584
"""simple docstring""" import string from math import logaa def A ( _A, _A ): """simple docstring""" snake_case_ :Union[str, Any] = document.translate( str.maketrans("", "", string.punctuation ) ).replace("\n", "" ) snake_case_ :Tuple = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A ( _A, _A ): """simple docstring""" snake_case_ :Dict = corpus.lower().translate( str.maketrans("", "", string.punctuation ) ) # strip all punctuation and replace it with '' snake_case_ :Any = corpus_without_punctuation.split("\n" ) snake_case_ :Dict = term.lower() return (len([doc for doc in docs if term in doc] ), len(_A )) def A ( _A, _A, _A=False ): """simple docstring""" if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ), 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ), 3 ) def A ( _A, _A ): """simple docstring""" return round(tf * idf, 3 )
584
1
'''simple docstring''' import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case_ ( A__ ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=5_12 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.0_2 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase="None" , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = relative_attention lowerCamelCase__ = position_biased_input lowerCamelCase__ = pos_att_type lowerCamelCase__ = scope def __UpperCAmelCase ( self): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices) lowerCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self): return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __UpperCAmelCase ( self , UpperCamelCase): self.parent.assertListEqual(list(result.loss.size()) , []) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = DebertaVaModel(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase)[0] lowerCamelCase__ = model(UpperCamelCase , token_type_ids=UpperCamelCase)[0] lowerCamelCase__ = model(UpperCamelCase)[0] self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size]) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = DebertaVaForMaskedLM(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = self.num_labels lowerCamelCase__ = DebertaVaForSequenceClassification(UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase) self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels]) self.check_loss_output(UpperCamelCase) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = self.num_labels lowerCamelCase__ = DebertaVaForTokenClassification(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = DebertaVaForQuestionAnswering(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase): lowerCamelCase__ = DebertaVaForMultipleChoice(config=UpperCamelCase) model.to(UpperCamelCase) model.eval() lowerCamelCase__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowerCamelCase__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowerCamelCase__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowerCamelCase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def __UpperCAmelCase ( self): lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case_ ( A__ , A__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase : str =( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) __lowerCAmelCase : int =( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase : Optional[Any] =True __lowerCAmelCase : List[str] =False __lowerCAmelCase : Dict =False __lowerCAmelCase : int =False __lowerCAmelCase : List[str] =False def __UpperCAmelCase ( self): lowerCamelCase__ = DebertaVaModelTester(self) lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37) def __UpperCAmelCase ( self): self.config_tester.run_common_tests() def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase) def __UpperCAmelCase ( self): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCamelCase) @slow def __UpperCAmelCase ( self): for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = DebertaVaModel.from_pretrained(UpperCamelCase) self.assertIsNotNone(UpperCamelCase) @require_torch @require_sentencepiece @require_tokenizers class snake_case_ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="Model not available yet") def __UpperCAmelCase ( self): pass @slow def __UpperCAmelCase ( self): lowerCamelCase__ = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge") lowerCamelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]]) lowerCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): lowerCamelCase__ = model(UpperCamelCase , attention_mask=UpperCamelCase)[0] # compare the actual values for a slice. lowerCamelCase__ = torch.tensor( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4) , f"""{output[:, 1:4, 1:4]}""")
711
'''simple docstring''' from __future__ import annotations from typing import Any def lowerCAmelCase( a__ : list[Any] ): '''simple docstring''' create_state_space_tree(a__ , [] , 0 ) def lowerCAmelCase( a__ : list[Any] , a__ : list[Any] , a__ : int ): '''simple docstring''' if index == len(a__ ): print(a__ ) return create_state_space_tree(a__ , a__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(a__ , a__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCAmelCase_ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["A", "B", "C"]) generate_all_subsequences(seq)
426
0
import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a_ ( __magic_name__ ) -> Optional[Any]: """simple docstring""" snake_case : List[Any] = botoa.client('''iam''' ) snake_case : Optional[int] = { '''Version''': '''2012-10-17''', '''Statement''': [ {'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=a__ , AssumeRolePolicyDocument=json.dumps(a__ , indent=2 ) ) snake_case : Dict = { '''Version''': '''2012-10-17''', '''Statement''': [ { '''Effect''': '''Allow''', '''Action''': [ '''sagemaker:*''', '''ecr:GetDownloadUrlForLayer''', '''ecr:BatchGetImage''', '''ecr:BatchCheckLayerAvailability''', '''ecr:GetAuthorizationToken''', '''cloudwatch:PutMetricData''', '''cloudwatch:GetMetricData''', '''cloudwatch:GetMetricStatistics''', '''cloudwatch:ListMetrics''', '''logs:CreateLogGroup''', '''logs:CreateLogStream''', '''logs:DescribeLogStreams''', '''logs:PutLogEvents''', '''logs:GetLogEvents''', '''s3:CreateBucket''', '''s3:ListBucket''', '''s3:GetBucketLocation''', '''s3:GetObject''', '''s3:PutObject''', ], '''Resource''': '''*''', } ], } # attach policy to role iam_client.put_role_policy( RoleName=a__ , PolicyName=F"{role_name}_policy_permission" , PolicyDocument=json.dumps(a__ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(F"role {role_name} already exists. Using existing one" ) def a_ ( __magic_name__ ) -> Any: """simple docstring""" snake_case : Optional[int] = botoa.client('''iam''' ) return iam_client.get_role(RoleName=a__ )["Role"]["Arn"] def a_ ( ) -> str: """simple docstring""" snake_case : Union[str, Any] = _ask_options( '''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , a__ , ) snake_case : Any = None if credentials_configuration == 0: snake_case : int = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' ) snake_case : List[str] = aws_profile else: print( '''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,''' '''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' ) snake_case : List[str] = _ask_field('''AWS Access Key ID: ''' ) snake_case : Optional[Any] = aws_access_key_id snake_case : Any = _ask_field('''AWS Secret Access Key: ''' ) snake_case : Tuple = aws_secret_access_key snake_case : Dict = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' ) snake_case : Optional[int] = aws_region snake_case : int = _ask_options( '''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , a__ , ) if role_management == 0: snake_case : Optional[int] = _ask_field('''Enter your IAM role name: ''' ) else: snake_case : List[str] = '''accelerate_sagemaker_execution_role''' print(F"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" ) _create_iam_role_for_sagemaker(a__ ) snake_case : int = _ask_field( '''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) snake_case : List[str] = None if is_custom_docker_image: snake_case : int = _ask_field('''Enter your Docker image: ''' , lambda __magic_name__ : str(a__ ).lower() ) snake_case : Any = _ask_field( '''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) snake_case : Union[str, Any] = None if is_sagemaker_inputs_enabled: snake_case : Union[str, Any] = _ask_field( '''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda __magic_name__ : str(a__ ).lower() , ) snake_case : Optional[int] = _ask_field( '''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) snake_case : Union[str, Any] = None if is_sagemaker_metrics_enabled: snake_case : Optional[Any] = _ask_field( '''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda __magic_name__ : str(a__ ).lower() , ) snake_case : Dict = _ask_options( '''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , ) snake_case : Optional[int] = {} snake_case : int = _ask_field( '''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) if use_dynamo: snake_case : Optional[int] = '''dynamo_''' snake_case : List[Any] = _ask_options( '''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) snake_case : Union[str, Any] = _ask_field( '''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) if use_custom_options: snake_case : Any = _ask_options( '''Which mode do you want to use?''' , a__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(a__ )] , default='''default''' , ) snake_case : Optional[int] = _ask_field( '''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) snake_case : Any = _ask_field( '''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a__ , error_message='''Please enter yes or no.''' , ) snake_case : Any = '''Which EC2 instance type you want to use for your training?''' if distributed_type != SageMakerDistributedType.NO: snake_case : Union[str, Any] = _ask_options( a__ , a__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a__ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" snake_case : Any = _ask_field(a__ , lambda __magic_name__ : str(a__ ).lower() , default='''ml.p3.2xlarge''' ) snake_case : List[str] = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): snake_case : List[Any] = _ask_field( '''How many machines do you want use? [1]: ''' , a__ , default=1 , ) snake_case : Tuple = _ask_options( '''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( '''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' ) return SageMakerConfig( image_uri=a__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a__ , use_cpu=a__ , dynamo_config=a__ , eca_instance_type=a__ , profile=a__ , region=a__ , iam_role_name=a__ , mixed_precision=a__ , num_machines=a__ , sagemaker_inputs_file=a__ , sagemaker_metrics_file=a__ , )
598
"""simple docstring""" from collections.abc import Sequence from queue import Queue class UpperCAmelCase_ : def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Tuple=None ) -> Any: _UpperCamelCase = start _UpperCamelCase = end _UpperCamelCase = val _UpperCamelCase = (start + end) // 2 _UpperCamelCase = left _UpperCamelCase = right def __repr__( self : List[str] ) -> Optional[Any]: return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class UpperCAmelCase_ : def __init__( self : Any , __UpperCamelCase : Sequence , __UpperCamelCase : Union[str, Any] ) -> Optional[int]: _UpperCamelCase = collection _UpperCamelCase = function if self.collection: _UpperCamelCase = self._build_tree(0 , len(__UpperCamelCase ) - 1 ) def _UpperCamelCase ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) -> Union[str, Any]: self._update_tree(self.root , __UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]: return self._query_range(self.root , __UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> List[str]: if start == end: return SegmentTreeNode(__UpperCamelCase , __UpperCamelCase , self.collection[start] ) _UpperCamelCase = (start + end) // 2 _UpperCamelCase = self._build_tree(__UpperCamelCase , __UpperCamelCase ) _UpperCamelCase = self._build_tree(mid + 1 , __UpperCamelCase ) return SegmentTreeNode(__UpperCamelCase , __UpperCamelCase , self.fn(left.val , right.val ) , __UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Tuple: if node.start == i and node.end == i: _UpperCamelCase = val return if i <= node.mid: self._update_tree(node.left , __UpperCamelCase , __UpperCamelCase ) else: self._update_tree(node.right , __UpperCamelCase , __UpperCamelCase ) _UpperCamelCase = self.fn(node.left.val , node.right.val ) def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) -> Dict: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __UpperCamelCase , __UpperCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __UpperCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCamelCase ) , ) else: # range in right child tree return self._query_range(node.right , __UpperCamelCase , __UpperCamelCase ) def _UpperCamelCase ( self : Optional[int] ) -> Tuple: if self.root is not None: _UpperCamelCase = Queue() queue.put(self.root ) while not queue.empty(): _UpperCamelCase = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) UpperCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
420
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_flax, require_tf, require_torch from transformers.utils import ( expand_dims, flatten_dict, is_flax_available, is_tf_available, is_torch_available, reshape, squeeze, transpose, ) if is_flax_available(): import jax.numpy as jnp if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch class __a ( unittest.TestCase ): def _UpperCAmelCase ( self : Dict) ->int: """simple docstring""" _lowercase = { """task_specific_params""": { """summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4}, """summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4}, """summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6}, } } _lowercase = { """task_specific_params.summarization.length_penalty""": 1.0, """task_specific_params.summarization.max_length""": 1_28, """task_specific_params.summarization.min_length""": 12, """task_specific_params.summarization.num_beams""": 4, """task_specific_params.summarization_cnn.length_penalty""": 2.0, """task_specific_params.summarization_cnn.max_length""": 1_42, """task_specific_params.summarization_cnn.min_length""": 56, """task_specific_params.summarization_cnn.num_beams""": 4, """task_specific_params.summarization_xsum.length_penalty""": 1.0, """task_specific_params.summarization_xsum.max_length""": 62, """task_specific_params.summarization_xsum.min_length""": 11, """task_specific_params.summarization_xsum.num_beams""": 6, } self.assertEqual(flatten_dict(lowercase__) , lowercase__) def _UpperCAmelCase ( self : List[str]) ->int: """simple docstring""" _lowercase = np.random.randn(3 , 4) self.assertTrue(np.allclose(transpose(lowercase__) , x.transpose())) _lowercase = np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(transpose(lowercase__ , axes=(1, 2, 0)) , x.transpose((1, 2, 0)))) @require_torch def _UpperCAmelCase ( self : str) ->Optional[Any]: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(transpose(lowercase__) , transpose(lowercase__).numpy())) _lowercase = np.random.randn(3 , 4 , 5) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(transpose(lowercase__ , axes=(1, 2, 0)) , transpose(lowercase__ , axes=(1, 2, 0)).numpy())) @require_tf def _UpperCAmelCase ( self : Union[str, Any]) ->str: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(transpose(lowercase__) , transpose(lowercase__).numpy())) _lowercase = np.random.randn(3 , 4 , 5) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(transpose(lowercase__ , axes=(1, 2, 0)) , transpose(lowercase__ , axes=(1, 2, 0)).numpy())) @require_flax def _UpperCAmelCase ( self : int) ->List[str]: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(transpose(lowercase__) , np.asarray(transpose(lowercase__)))) _lowercase = np.random.randn(3 , 4 , 5) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(transpose(lowercase__ , axes=(1, 2, 0)) , np.asarray(transpose(lowercase__ , axes=(1, 2, 0))))) def _UpperCAmelCase ( self : Tuple) ->Tuple: """simple docstring""" _lowercase = np.random.randn(3 , 4) self.assertTrue(np.allclose(reshape(lowercase__ , (4, 3)) , np.reshape(lowercase__ , (4, 3)))) _lowercase = np.random.randn(3 , 4 , 5) self.assertTrue(np.allclose(reshape(lowercase__ , (12, 5)) , np.reshape(lowercase__ , (12, 5)))) @require_torch def _UpperCAmelCase ( self : Any) ->str: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(reshape(lowercase__ , (4, 3)) , reshape(lowercase__ , (4, 3)).numpy())) _lowercase = np.random.randn(3 , 4 , 5) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(reshape(lowercase__ , (12, 5)) , reshape(lowercase__ , (12, 5)).numpy())) @require_tf def _UpperCAmelCase ( self : Dict) ->str: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(reshape(lowercase__ , (4, 3)) , reshape(lowercase__ , (4, 3)).numpy())) _lowercase = np.random.randn(3 , 4 , 5) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(reshape(lowercase__ , (12, 5)) , reshape(lowercase__ , (12, 5)).numpy())) @require_flax def _UpperCAmelCase ( self : str) ->Optional[int]: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(reshape(lowercase__ , (4, 3)) , np.asarray(reshape(lowercase__ , (4, 3))))) _lowercase = np.random.randn(3 , 4 , 5) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(reshape(lowercase__ , (12, 5)) , np.asarray(reshape(lowercase__ , (12, 5))))) def _UpperCAmelCase ( self : Any) ->Optional[Any]: """simple docstring""" _lowercase = np.random.randn(1 , 3 , 4) self.assertTrue(np.allclose(squeeze(lowercase__) , np.squeeze(lowercase__))) _lowercase = np.random.randn(1 , 4 , 1 , 5) self.assertTrue(np.allclose(squeeze(lowercase__ , axis=2) , np.squeeze(lowercase__ , axis=2))) @require_torch def _UpperCAmelCase ( self : List[str]) ->Union[str, Any]: """simple docstring""" _lowercase = np.random.randn(1 , 3 , 4) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(squeeze(lowercase__) , squeeze(lowercase__).numpy())) _lowercase = np.random.randn(1 , 4 , 1 , 5) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(squeeze(lowercase__ , axis=2) , squeeze(lowercase__ , axis=2).numpy())) @require_tf def _UpperCAmelCase ( self : Optional[int]) ->str: """simple docstring""" _lowercase = np.random.randn(1 , 3 , 4) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(squeeze(lowercase__) , squeeze(lowercase__).numpy())) _lowercase = np.random.randn(1 , 4 , 1 , 5) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(squeeze(lowercase__ , axis=2) , squeeze(lowercase__ , axis=2).numpy())) @require_flax def _UpperCAmelCase ( self : int) ->Dict: """simple docstring""" _lowercase = np.random.randn(1 , 3 , 4) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(squeeze(lowercase__) , np.asarray(squeeze(lowercase__)))) _lowercase = np.random.randn(1 , 4 , 1 , 5) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(squeeze(lowercase__ , axis=2) , np.asarray(squeeze(lowercase__ , axis=2)))) def _UpperCAmelCase ( self : Optional[int]) ->Tuple: """simple docstring""" _lowercase = np.random.randn(3 , 4) self.assertTrue(np.allclose(expand_dims(lowercase__ , axis=1) , np.expand_dims(lowercase__ , axis=1))) @require_torch def _UpperCAmelCase ( self : Any) ->Optional[Any]: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = torch.tensor(lowercase__) self.assertTrue(np.allclose(expand_dims(lowercase__ , axis=1) , expand_dims(lowercase__ , axis=1).numpy())) @require_tf def _UpperCAmelCase ( self : str) ->Optional[Any]: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = tf.constant(lowercase__) self.assertTrue(np.allclose(expand_dims(lowercase__ , axis=1) , expand_dims(lowercase__ , axis=1).numpy())) @require_flax def _UpperCAmelCase ( self : str) ->Any: """simple docstring""" _lowercase = np.random.randn(3 , 4) _lowercase = jnp.array(lowercase__) self.assertTrue(np.allclose(expand_dims(lowercase__ , axis=1) , np.asarray(expand_dims(lowercase__ , axis=1))))
572
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart _lowerCamelCase = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } _lowerCamelCase = { 'facebook/bart-base': 1_0_2_4, 'facebook/bart-large': 1_0_2_4, 'facebook/bart-large-mnli': 1_0_2_4, 'facebook/bart-large-cnn': 1_0_2_4, 'facebook/bart-large-xsum': 1_0_2_4, 'yjernite/bart_eli5': 1_0_2_4, } class __a ( _snake_case ): __SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask'] __SCREAMING_SNAKE_CASE : Union[str, Any] = BartTokenizer def __init__( self : Tuple , lowercase__ : Optional[int]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[int]=None , lowercase__ : Dict="replace" , lowercase__ : Tuple="<s>" , lowercase__ : Union[str, Any]="</s>" , lowercase__ : Optional[int]="</s>" , lowercase__ : Tuple="<s>" , lowercase__ : str="<unk>" , lowercase__ : Optional[Any]="<pad>" , lowercase__ : Optional[Any]="<mask>" , lowercase__ : Union[str, Any]=False , lowercase__ : str=True , **lowercase__ : Dict , ) ->List[str]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , ) _lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("""add_prefix_space""" , lowercase__) != add_prefix_space: _lowercase = getattr(lowercase__ , pre_tok_state.pop("""type""")) _lowercase = add_prefix_space _lowercase = pre_tok_class(**lowercase__) _lowercase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowercase = """post_processor""" _lowercase = getattr(self.backend_tokenizer , lowercase__ , lowercase__) if tokenizer_component_instance: _lowercase = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowercase = tuple(state["""sep"""]) if "cls" in state: _lowercase = tuple(state["""cls"""]) _lowercase = False if state.get("""add_prefix_space""" , lowercase__) != add_prefix_space: _lowercase = add_prefix_space _lowercase = True if state.get("""trim_offsets""" , lowercase__) != trim_offsets: _lowercase = trim_offsets _lowercase = True if changes_to_apply: _lowercase = getattr(lowercase__ , state.pop("""type""")) _lowercase = component_class(**lowercase__) setattr(self.backend_tokenizer , lowercase__ , lowercase__) @property def _UpperCAmelCase ( self : List[Any]) ->str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""") return None return str(self._mask_token) @mask_token.setter def _UpperCAmelCase ( self : Union[str, Any] , lowercase__ : str) ->int: """simple docstring""" _lowercase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else value _lowercase = value def _UpperCAmelCase ( self : Tuple , *lowercase__ : Any , **lowercase__ : Dict) ->BatchEncoding: """simple docstring""" _lowercase = kwargs.get("""is_split_into_words""" , lowercase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""") return super()._batch_encode_plus(*lowercase__ , **lowercase__) def _UpperCAmelCase ( self : List[str] , *lowercase__ : Optional[int] , **lowercase__ : List[Any]) ->BatchEncoding: """simple docstring""" _lowercase = kwargs.get("""is_split_into_words""" , lowercase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""") return super()._encode_plus(*lowercase__ , **lowercase__) def _UpperCAmelCase ( self : Any , lowercase__ : str , lowercase__ : Optional[str] = None) ->Tuple[str]: """simple docstring""" _lowercase = self._tokenizer.model.save(lowercase__ , name=lowercase__) return tuple(lowercase__) def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : int=None) ->Tuple: """simple docstring""" _lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None) ->List[int]: """simple docstring""" _lowercase = [self.sep_token_id] _lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
572
1
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _snake_case : List[str] = logging.get_logger(__name__) def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str ): __lowerCAmelCase = RobertaPreLayerNormConfig.from_pretrained( lowerCAmelCase_, architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict __lowerCAmelCase = torch.load(hf_hub_download(repo_id=lowerCAmelCase_, filename='pytorch_model.bin' ) ) __lowerCAmelCase = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): __lowerCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue __lowerCAmelCase = tensor_value __lowerCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCAmelCase_, config=lowerCAmelCase_, state_dict=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) # convert tokenizer __lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) tokenizer.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": _snake_case : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _snake_case : Union[str, Any] = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
53
'''simple docstring''' import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging _a : Tuple = """\ """ _a : Tuple = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ _a : Optional[Any] = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): def lowerCamelCase__ ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],) def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __lowerCAmelCase = """cuda""" else: __lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu""" __lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__SCREAMING_SNAKE_CASE ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __lowerCAmelCase = model.config.max_length - 1 else: __lowerCAmelCase = model.config.max_length __lowerCAmelCase = tokenizer( __SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = encodings["""input_ids"""] __lowerCAmelCase = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __lowerCAmelCase = [] __lowerCAmelCase = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = encoded_texts[start_index:end_index] __lowerCAmelCase = attn_masks[start_index:end_index] if add_start_token: __lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 ) __lowerCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 ) __lowerCAmelCase = encoded_batch with torch.no_grad(): __lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits __lowerCAmelCase = out_logits[..., :-1, :].contiguous() __lowerCAmelCase = labels[..., 1:].contiguous() __lowerCAmelCase = attn_mask[..., 1:].contiguous() __lowerCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
689
0
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class _snake_case ( a__ ): def __init__( self : List[Any] , UpperCAmelCase : pyspark.sql.DataFrame , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : bool = True , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : str = None , UpperCAmelCase : bool = True , UpperCAmelCase : str = "arrow" , **UpperCAmelCase : List[Any] , ): super().__init__( split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , ) __lowerCamelCase : List[Any] = load_from_cache_file __lowerCamelCase : Union[str, Any] = file_format __lowerCamelCase : List[Any] = Spark( df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , ) def lowerCamelCase__ ( self : str ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __lowerCamelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCAmelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
366
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __A = (720, 1280) # Height, Width __A = (0.4, 0.6) # if height or width lower than this scale, drop it. __A = 1 / 100 __A = '''''' __A = '''''' __A = '''''' __A = 250 def lowercase_ ( ) -> None: '''simple docstring''' __lowerCamelCase , __lowerCamelCase : List[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase ) for index in range(_lowerCamelCase ): __lowerCamelCase : Optional[Any] = random.sample(range(len(_lowerCamelCase ) ) , 4 ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = update_image_and_anno( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase : Tuple = random_chars(32 ) __lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0] __lowerCamelCase : List[str] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) __lowerCamelCase : List[Any] = [] for anno in new_annos: __lowerCamelCase : Any = anno[3] - anno[1] __lowerCamelCase : Optional[int] = anno[4] - anno[2] __lowerCamelCase : Optional[int] = anno[1] + width / 2 __lowerCamelCase : Union[str, Any] = anno[2] + height / 2 __lowerCamelCase : int = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(_lowerCamelCase ) with open(F"""{file_root}.txt""" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[list, list]: '''simple docstring''' __lowerCamelCase : Optional[int] = [] __lowerCamelCase : Any = [] for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ): __lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(_lowerCamelCase ) as in_file: __lowerCamelCase : Tuple = in_file.readlines() __lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" ) __lowerCamelCase : Union[str, Any] = [] for obj_list in obj_lists: __lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " ) __lowerCamelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2 __lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2 __lowerCamelCase : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2 __lowerCamelCase : Any = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_lowerCamelCase ) labels.append(_lowerCamelCase ) return img_paths, labels def lowercase_ ( _lowerCamelCase: list , _lowerCamelCase: list , _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, int] , _lowerCamelCase: tuple[float, float] , _lowerCamelCase: float = 0.0 , ) -> tuple[list, list, str]: '''simple docstring''' __lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __lowerCamelCase : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __lowerCamelCase : int = int(scale_x * output_size[1] ) __lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] ) __lowerCamelCase : List[Any] = [] __lowerCamelCase : Optional[Any] = [] for i, index in enumerate(_lowerCamelCase ): __lowerCamelCase : List[str] = all_img_list[index] path_list.append(_lowerCamelCase ) __lowerCamelCase : Optional[Any] = all_annos[index] __lowerCamelCase : List[str] = cva.imread(_lowerCamelCase ) if i == 0: # top-left __lowerCamelCase : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) ) __lowerCamelCase : Any = img for bbox in img_annos: __lowerCamelCase : str = bbox[1] * scale_x __lowerCamelCase : Union[str, Any] = bbox[2] * scale_y __lowerCamelCase : Optional[int] = bbox[3] * scale_x __lowerCamelCase : Union[str, Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __lowerCamelCase : str = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) ) __lowerCamelCase : Any = img for bbox in img_annos: __lowerCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x) __lowerCamelCase : List[Any] = bbox[2] * scale_y __lowerCamelCase : Tuple = scale_x + bbox[3] * (1 - scale_x) __lowerCamelCase : Union[str, Any] = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) ) __lowerCamelCase : List[str] = img for bbox in img_annos: __lowerCamelCase : Any = bbox[1] * scale_x __lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y) __lowerCamelCase : Dict = bbox[3] * scale_x __lowerCamelCase : Tuple = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __lowerCamelCase : int = cva.resize( _lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __lowerCamelCase : Optional[Any] = img for bbox in img_annos: __lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x) __lowerCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y) __lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x) __lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __lowerCamelCase : str = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def lowercase_ ( _lowerCamelCase: int ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase : Tuple = ascii_lowercase + digits return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) ) if __name__ == "__main__": main() print('''DONE ✅''')
366
1
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowercase_ : def __init__( self , lowercase_ , lowercase_=sys.maxsize) -> Any: a__ ='bilinear' a__ =max_size a__ =short_edge_length def __call__( self , lowercase_) -> List[Any]: a__ =[] for img in imgs: a__ , a__ =img.shape[:2] # later: provide list and randomly choose index for resize a__ =np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img a__ =size * 1.0 / min(lowercase_ , lowercase_) if h < w: a__ , a__ =size, scale * w else: a__ , a__ =scale * h, size if max(lowercase_ , lowercase_) > self.max_size: a__ =self.max_size * 1.0 / max(lowercase_ , lowercase_) a__ =newh * scale a__ =neww * scale a__ =int(neww + 0.5) a__ =int(newh + 0.5) if img.dtype == np.uinta: a__ =Image.fromarray(lowercase_) a__ =pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) a__ =np.asarray(lowercase_) else: a__ =img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw a__ =nn.functional.interpolate( lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_).squeeze(0) img_augs.append(lowercase_) return img_augs class lowercase_ : def __init__( self , lowercase_) -> Any: a__ =ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) a__ =cfg.INPUT.FORMAT a__ =cfg.SIZE_DIVISIBILITY a__ =cfg.PAD_VALUE a__ =cfg.INPUT.MAX_SIZE_TEST a__ =cfg.MODEL.DEVICE a__ =torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) a__ =torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) a__ =lambda lowercase_: (x - self.pixel_mean) / self.pixel_std def __UpperCamelCase ( self , lowercase_) -> Tuple: a__ =tuple(max(lowercase_) for s in zip(*[img.shape for img in images])) a__ =[im.shape[-2:] for im in images] a__ =[ nn.functional.pad( lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowercase_ , lowercase_) ] return torch.stack(lowercase_), torch.tensor(lowercase_) def __call__( self , lowercase_ , lowercase_=False) -> Dict: with torch.no_grad(): if not isinstance(lowercase_ , lowercase_): a__ =[images] if single_image: assert len(lowercase_) == 1 for i in range(len(lowercase_)): if isinstance(images[i] , torch.Tensor): images.insert(lowercase_ , images.pop(lowercase_).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge a__ =torch.tensor([im.shape[:2] for im in images]) a__ =self.aug(lowercase_) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic a__ =[self.normalizer(lowercase_) for x in images] # now pad them to do the following operations a__ , a__ =self.pad(lowercase_) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad a__ =torch.true_divide(lowercase_ , lowercase_) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowercase( __a : str , __a : Tuple ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowercase( __a : Dict , __a : Tuple[int, int] ): assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!" a__ , a__ =box_size tensor[:, 0].clamp_(min=0 , max=__a ) tensor[:, 1].clamp_(min=0 , max=__a ) tensor[:, 2].clamp_(min=0 , max=__a ) tensor[:, 3].clamp_(min=0 , max=__a )
20
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ : Any = logging.get_logger(__name__) lowerCamelCase_ : Dict = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class _UpperCamelCase ( _A ): '''simple docstring''' __UpperCamelCase : Any = """dpr""" def __init__( self : str , snake_case_ : Any=3_0522 , snake_case_ : List[str]=768 , snake_case_ : List[str]=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : Tuple=3072 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : Dict=512 , snake_case_ : int=2 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : str=1e-12 , snake_case_ : List[str]=0 , snake_case_ : Any="absolute" , snake_case_ : int = 0 , **snake_case_ : int , ): super().__init__(pad_token_id=snake_case_ , **snake_case_ ) UpperCamelCase_: str = vocab_size UpperCamelCase_: Optional[int] = hidden_size UpperCamelCase_: int = num_hidden_layers UpperCamelCase_: Any = num_attention_heads UpperCamelCase_: int = hidden_act UpperCamelCase_: Optional[Any] = intermediate_size UpperCamelCase_: Tuple = hidden_dropout_prob UpperCamelCase_: Tuple = attention_probs_dropout_prob UpperCamelCase_: str = max_position_embeddings UpperCamelCase_: Any = type_vocab_size UpperCamelCase_: Tuple = initializer_range UpperCamelCase_: List[Any] = layer_norm_eps UpperCamelCase_: Tuple = projection_dim UpperCamelCase_: Dict = position_embedding_type
548
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class snake_case_ ( unittest.TestCase ): def __A ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] = ['a', 'b', 'c'] # Defaults to last layer if both are None SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_aligned_output_features_output_indices(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ['c'] ) self.assertEqual(_lowerCAmelCase , [2] ) # Out indices set to match out features SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_aligned_output_features_output_indices(['a', 'c'] , _lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ['a', 'c'] ) self.assertEqual(_lowerCAmelCase , [0, 2] ) # Out features set to match out indices SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = get_aligned_output_features_output_indices(_lowerCAmelCase , [0, 2] , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ['a', 'c'] ) self.assertEqual(_lowerCAmelCase , [0, 2] ) # Out features selected from negative indices SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = get_aligned_output_features_output_indices(_lowerCAmelCase , [-3, -1] , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , ['a', 'c'] ) self.assertEqual(_lowerCAmelCase , [-3, -1] ) def __A ( self ): with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , _lowerCAmelCase ) # Out features must be a list with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(_lowerCAmelCase , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(_lowerCAmelCase , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(_lowerCAmelCase ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def __A ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] = BackboneMixin() SCREAMING_SNAKE_CASE_ : int = ['a', 'b', 'c'] SCREAMING_SNAKE_CASE_ : str = ['a', 'c'] SCREAMING_SNAKE_CASE_ : Optional[int] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) SCREAMING_SNAKE_CASE_ : str = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
709
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance < 0: raise ValueError('Resistance cannot be negative' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
311
0
"""simple docstring""" def _lowerCAmelCase ( lowerCAmelCase ): '''simple docstring''' if num < 0: return False UpperCAmelCase = num UpperCAmelCase = 0 while num > 0: UpperCAmelCase = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
673
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class UpperCamelCase_ : def __init__( self , snake_case__ , snake_case__=sys.maxsize ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = """bilinear""" UpperCAmelCase = max_size UpperCAmelCase = short_edge_length def __call__( self , snake_case__ ) -> List[Any]: """simple docstring""" UpperCAmelCase = [] for img in imgs: UpperCAmelCase , UpperCAmelCase = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase , UpperCAmelCase = size, scale * w else: UpperCAmelCase , UpperCAmelCase = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase = newh * scale UpperCAmelCase = neww * scale UpperCAmelCase = int(neww + 0.5 ) UpperCAmelCase = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase = Image.fromarray(snake_case__ ) UpperCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase = np.asarray(snake_case__ ) else: UpperCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class UpperCamelCase_ : def __init__( self , snake_case__ ) -> Optional[Any]: """simple docstring""" UpperCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase = cfg.INPUT.FORMAT UpperCAmelCase = cfg.SIZE_DIVISIBILITY UpperCAmelCase = cfg.PAD_VALUE UpperCAmelCase = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase = cfg.MODEL.DEVICE UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def UpperCamelCase_ ( self , snake_case__ ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase = [im.shape[-2:] for im in images] UpperCAmelCase = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self , snake_case__ , snake_case__=False ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase , UpperCAmelCase = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' assert torch.isfinite(lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase , UpperCAmelCase = box_size tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase ) tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase ) tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase ) tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase )
673
1
"""simple docstring""" from collections import deque def A__ ( _UpperCAmelCase : int ) -> List[str]: '''simple docstring''' snake_case__ : List[Any] = len(_UpperCAmelCase ) snake_case__ : Optional[Any] = deque() snake_case__ : Tuple = [False for _ in range(_UpperCAmelCase )] snake_case__ : List[str] = [-1 for _ in range(_UpperCAmelCase )] snake_case__ : List[str] = index_of[:] def strong_connect(_UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Dict ): snake_case__ : int = index # the number when this node is seen snake_case__ : Optional[int] = index # lowest rank node reachable from here index += 1 stack.append(_UpperCAmelCase ) snake_case__ : Dict = True for w in g[v]: if index_of[w] == -1: snake_case__ : Union[str, Any] = strong_connect(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) snake_case__ : Any = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: snake_case__ : Union[str, Any] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: snake_case__ : Any = [] snake_case__ : List[Any] = stack.pop() snake_case__ : List[str] = False component.append(_UpperCAmelCase ) while w != v: snake_case__ : Optional[int] = stack.pop() snake_case__ : Dict = False component.append(_UpperCAmelCase ) components.append(_UpperCAmelCase ) return index snake_case__ : str = [] for v in range(_UpperCAmelCase ): if index_of[v] == -1: strong_connect(_UpperCAmelCase , 0 , _UpperCAmelCase ) return components def A__ ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> str: '''simple docstring''' snake_case__ : int = [[] for _ in range(_UpperCAmelCase )] for u, v in edges: g[u].append(_UpperCAmelCase ) return g if __name__ == "__main__": # Test lowercase = 7 lowercase = [0, 0, 1, 2, 3, 3, 4, 4, 6] lowercase = [1, 3, 2, 0, 1, 4, 5, 6, 5] lowercase = [(u, v) for u, v in zip(source, target)] lowercase = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
150
"""simple docstring""" def A__ ( _UpperCAmelCase : int = 1_00_00_00 ) -> int: '''simple docstring''' snake_case__ : List[Any] = limit + 1 snake_case__ : Union[str, Any] = [0] * limit for first_term in range(1 , _UpperCAmelCase ): for n in range(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): snake_case__ : List[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a snake_case__ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f"{solution() = }")
150
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Optional[int] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp SCREAMING_SNAKE_CASE_: Any ={ 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE_: int ={ 'RUCAIBox/mvp': 10_24, } class __A ( UpperCamelCase__ ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : str = PRETRAINED_VOCAB_FILES_MAP a__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Any = ["""input_ids""", """attention_mask"""] a__ : List[Any] = MvpTokenizer def __init__(self : Dict , __a : List[Any]=None , __a : List[Any]=None , __a : Optional[int]=None , __a : Any="replace" , __a : Optional[Any]="<s>" , __a : List[str]="</s>" , __a : int="</s>" , __a : Optional[int]="<s>" , __a : str="<unk>" , __a : str="<pad>" , __a : List[Any]="<mask>" , __a : Tuple=False , __a : str=True , **__a : Optional[int] , ): super().__init__( __a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , ) UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space: UpperCAmelCase_ = getattr(__a , pre_tok_state.pop("type" ) ) UpperCAmelCase_ = add_prefix_space UpperCAmelCase_ = pre_tok_class(**__a ) UpperCAmelCase_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase_ = "post_processor" UpperCAmelCase_ = getattr(self.backend_tokenizer , __a , __a ) if tokenizer_component_instance: UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase_ = tuple(state["sep"] ) if "cls" in state: UpperCAmelCase_ = tuple(state["cls"] ) UpperCAmelCase_ = False if state.get("add_prefix_space" , __a ) != add_prefix_space: UpperCAmelCase_ = add_prefix_space UpperCAmelCase_ = True if state.get("trim_offsets" , __a ) != trim_offsets: UpperCAmelCase_ = trim_offsets UpperCAmelCase_ = True if changes_to_apply: UpperCAmelCase_ = getattr(__a , state.pop("type" ) ) UpperCAmelCase_ = component_class(**__a ) setattr(self.backend_tokenizer , __a , __a ) @property def _lowercase (self : List[str] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _lowercase (self : str , __a : str ): UpperCAmelCase_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value UpperCAmelCase_ = value def _lowercase (self : str , *__a : Dict , **__a : Optional[Any] ): UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__a , **__a ) def _lowercase (self : Optional[int] , *__a : Optional[int] , **__a : int ): UpperCAmelCase_ = kwargs.get("is_split_into_words" , __a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__a , **__a ) def _lowercase (self : List[str] , __a : str , __a : Optional[str] = None ): UpperCAmelCase_ = self._tokenizer.model.save(__a , name=__a ) return tuple(__a ) def _lowercase (self : Tuple , __a : Any , __a : List[str]=None ): UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase (self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ): UpperCAmelCase_ = [self.sep_token_id] UpperCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
78
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase: def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str: """simple docstring""" _lowercase : Union[str, Any] = parent _lowercase : Optional[Any] = batch_size _lowercase : List[str] = seq_length _lowercase : int = is_training _lowercase : List[str] = use_input_lengths _lowercase : int = use_token_type_ids _lowercase : Any = use_labels _lowercase : Union[str, Any] = gelu_activation _lowercase : List[str] = sinusoidal_embeddings _lowercase : str = causal _lowercase : Optional[int] = asm _lowercase : Union[str, Any] = n_langs _lowercase : List[Any] = vocab_size _lowercase : Any = n_special _lowercase : Any = hidden_size _lowercase : str = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Union[str, Any] = max_position_embeddings _lowercase : List[str] = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : int = num_labels _lowercase : Optional[int] = num_choices _lowercase : Optional[Any] = summary_type _lowercase : Optional[Any] = use_proj _lowercase : int = scope _lowercase : List[Any] = bos_token_id def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length]) _lowercase : int = None if self.use_input_lengths: _lowercase : Dict = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs) _lowercase : Tuple = None _lowercase : int = None _lowercase : int = None if self.use_labels: _lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowercase : Dict = ids_tensor([self.batch_size], 2).float() _lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices) _lowercase : Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, ) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple: """simple docstring""" _lowercase : List[Any] = XLMModel(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase) _lowercase : int = model(lowerCamelCase, langs=lowerCamelCase) _lowercase : Any = model(lowerCamelCase) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]: """simple docstring""" _lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str: """simple docstring""" _lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Dict = model(lowerCamelCase) _lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase) _lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]: """simple docstring""" _lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[Any] = model(lowerCamelCase) _lowercase : List[Any] = model( lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, ) _lowercase : List[str] = model( lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, ) ((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple() _lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase) ((_lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int: """simple docstring""" _lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[int] = model(lowerCamelCase) _lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]: """simple docstring""" _lowercase : Any = self.num_labels _lowercase : str = XLMForTokenClassification(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict: """simple docstring""" _lowercase : Optional[Any] = self.num_choices _lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : List[str] = model( lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Dict = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Optional[Any] = config_and_inputs _lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class _lowerCamelCase( _a, _a, _a, unittest.TestCase ): lowercase_ : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase_ : Optional[int] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase_ : Union[str, Any] = ( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]: """simple docstring""" _lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": _lowercase : Any = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase) _lowercase : Dict = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase) return inputs_dict def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Union[str, Any] = XLMModelTester(self) _lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37) def UpperCamelCase ( self) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int: """simple docstring""" self.assertIsInstance(lowerCamelCase, lowerCamelCase) self.assertListEqual( [isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase)) self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(lowerCamelCase): # adds PAD dummy token _lowercase : Dict = min_length + idx + 1 _lowercase : int = min_length + idx + 1 _lowercase : Dict = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]: """simple docstring""" self.assertIsInstance(lowerCamelCase, lowerCamelCase) self.assertListEqual( [isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), ) self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(lowerCamelCase): # adds PAD dummy token _lowercase : int = min_length + idx + 1 _lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), ) pass @slow def UpperCamelCase ( self) -> int: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase) self.assertIsNotNone(lowerCamelCase) @require_torch class _lowerCamelCase( unittest.TestCase ): @slow def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') model.to(lowerCamelCase) _lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president _lowercase : Any = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference _lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase) self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
89
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : List[Any] = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
709
import qiskit def A_ ( A__ , A__ ) -> qiskit.result.counts.Counts: a__ : str = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register a__ : str = qiskit.QuantumCircuit(A__ , A__ ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator a__ : Dict = qiskit.execute(A__ , A__ , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(A__ ) if __name__ == "__main__": print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
392
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Optional[Any] = 13 _lowerCamelCase : Optional[int] = 7 _lowerCamelCase : List[str] = True _lowerCamelCase : Tuple = True _lowerCamelCase : Dict = False _lowerCamelCase : List[str] = True _lowerCamelCase : Dict = 99 _lowerCamelCase : Optional[Any] = 32 _lowerCamelCase : Tuple = 2 _lowerCamelCase : Union[str, Any] = 4 _lowerCamelCase : Union[str, Any] = 37 _lowerCamelCase : Dict = 'gelu' _lowerCamelCase : List[str] = 0.1 _lowerCamelCase : Tuple = 0.1 _lowerCamelCase : List[str] = 512 _lowerCamelCase : int = 16 _lowerCamelCase : Tuple = 2 _lowerCamelCase : List[Any] = 0.02 _lowerCamelCase : Tuple = 3 _lowerCamelCase : List[str] = 4 _lowerCamelCase : Dict = None def A_ ( self ): _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Tuple = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : Union[str, Any] = None if self.use_labels: _lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowerCamelCase : str = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = TFDistilBertModel(config=lowercase ) _lowerCamelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCamelCase : List[str] = model(lowercase ) _lowerCamelCase : List[Any] = [input_ids, input_mask] _lowerCamelCase : Dict = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFDistilBertForMaskedLM(config=lowercase ) _lowerCamelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCamelCase : int = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : str = TFDistilBertForQuestionAnswering(config=lowercase ) _lowerCamelCase : Tuple = { 'input_ids': input_ids, 'attention_mask': input_mask, } _lowerCamelCase : int = model(lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = self.num_labels _lowerCamelCase : int = TFDistilBertForSequenceClassification(lowercase ) _lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCamelCase : List[str] = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : int = self.num_choices _lowerCamelCase : List[str] = TFDistilBertForMultipleChoice(lowercase ) _lowerCamelCase : int = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) _lowerCamelCase : Optional[int] = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) ) _lowerCamelCase : int = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, } _lowerCamelCase : List[Any] = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = self.num_labels _lowerCamelCase : Union[str, Any] = TFDistilBertForTokenClassification(lowercase ) _lowerCamelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} _lowerCamelCase : str = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A_ ( self ): _lowerCamelCase : Dict = self.prepare_config_and_inputs() ((_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase)) : str = config_and_inputs _lowerCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) lowerCamelCase__ = ( { """feature-extraction""": TFDistilBertModel, """fill-mask""": TFDistilBertForMaskedLM, """question-answering""": TFDistilBertForQuestionAnswering, """text-classification""": TFDistilBertForSequenceClassification, """token-classification""": TFDistilBertForTokenClassification, """zero-shot""": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : List[str] = TFDistilBertModelTester(self ) _lowerCamelCase : Any = ConfigTester(self , config_class=lowercase , dim=37 ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowercase ) def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase ) def A_ ( self ): _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase ) def A_ ( self ): _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase ) @slow def A_ ( self ): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): _lowerCamelCase : int = TFDistilBertModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : int = TFDistilBertModel.from_pretrained('distilbert-base-uncased' ) _lowerCamelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowerCamelCase : int = model(lowercase )[0] _lowerCamelCase : Union[str, Any] = [1, 6, 768] self.assertEqual(output.shape , lowercase ) _lowerCamelCase : Dict = tf.constant( [ [ [0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99], [0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04], [0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
630
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _snake_case ( lowercase__ ): _lowerCamelCase : int = int(number**0.5 ) return number == sq * sq def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den _lowerCamelCase : int = x_den * y_den * z_den _lowerCamelCase : int = gcd(lowercase__ , lowercase__ ) top //= hcf bottom //= hcf return top, bottom def _snake_case ( lowercase__ = 35 ): _lowerCamelCase : set = set() _lowerCamelCase : int _lowerCamelCase : Fraction = Fraction(0 ) _lowerCamelCase : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 _lowerCamelCase : int = x_num * y_den + x_den * y_num _lowerCamelCase : List[Any] = x_den * y_den _lowerCamelCase : int = gcd(lowercase__ , lowercase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCamelCase : Optional[Any] = add_three( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) unique_s.add(lowercase__ ) # n=2 _lowerCamelCase : Tuple = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) _lowerCamelCase : Dict = x_den * x_den * y_den * y_den if is_sq(lowercase__ ) and is_sq(lowercase__ ): _lowerCamelCase : Dict = int(sqrt(lowercase__ ) ) _lowerCamelCase : Optional[Any] = int(sqrt(lowercase__ ) ) _lowerCamelCase : List[Any] = gcd(lowercase__ , lowercase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCamelCase : Union[str, Any] = add_three( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) unique_s.add(lowercase__ ) # n=-1 _lowerCamelCase : List[str] = x_num * y_num _lowerCamelCase : int = x_den * y_num + x_num * y_den _lowerCamelCase : Dict = gcd(lowercase__ , lowercase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCamelCase : List[str] = add_three( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) unique_s.add(lowercase__ ) # n=2 _lowerCamelCase : Tuple = x_num * x_num * y_num * y_num _lowerCamelCase : List[str] = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowercase__ ) and is_sq(lowercase__ ): _lowerCamelCase : Optional[int] = int(sqrt(lowercase__ ) ) _lowerCamelCase : Optional[int] = int(sqrt(lowercase__ ) ) _lowerCamelCase : Dict = gcd(lowercase__ , lowercase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: _lowerCamelCase : int = add_three( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) unique_s.add(lowercase__ ) for num, den in unique_s: total += Fraction(lowercase__ , lowercase__ ) return total.denominator + total.numerator if __name__ == "__main__": print(F"{solution() = }")
630
1
from __future__ import annotations import math def UpperCAmelCase__ ( _A ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCamelCase__ = [num for num in range(3, 100_001, 2) if not is_prime(num)] def UpperCAmelCase__ ( _A ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) a_ = [] for num in range(len(_A ) ): a_ = 0 while 2 * i * i <= odd_composites[num]: a_ = odd_composites[num] - 2 * i * i if is_prime(_A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(_A ) == n: return list_nums return [] def UpperCAmelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
143
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def UpperCAmelCase__ ( _A=None ): """simple docstring""" if subparsers is not None: a_ = subparsers.add_parser('''test''' ) else: a_ = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' , default=_A , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def UpperCAmelCase__ ( _A ): """simple docstring""" a_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: a_ = script_name else: a_ = f"--config_file={args.config_file} {script_name}" a_ = ['''accelerate-launch'''] + test_args.split() a_ = execute_subprocess_async(_A , env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def UpperCAmelCase__ ( ): """simple docstring""" a_ = test_command_parser() a_ = parser.parse_args() test_command(_A ) if __name__ == "__main__": main()
143
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __a ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE__ : Optional[Any] = "LayoutLMv3ImageProcessor" SCREAMING_SNAKE_CASE__ : List[str] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self , a__=None , a__=None , **a__ ): _lowerCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a__ , ) _lowerCamelCase = kwargs.pop('feature_extractor' ) _lowerCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a__ , a__ ) def __call__( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = True , a__ = False , a__ = None , a__ = None , a__ = 0 , a__ = None , a__ = None , a__ = None , a__ = False , a__ = False , a__ = False , a__ = False , a__ = True , a__ = None , **a__ , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor _lowerCamelCase = self.image_processor(images=a__ , return_tensors=a__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a__ , a__ ): _lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) _lowerCamelCase = features['words'] _lowerCamelCase = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , ) # add pixel values _lowerCamelCase = features.pop('pixel_values' ) if return_overflowing_tokens is True: _lowerCamelCase = self.get_overflowing_images(a__ , encoded_inputs['overflow_to_sample_mapping'] ) _lowerCamelCase = images return encoded_inputs def snake_case_ ( self , a__ , a__ ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image _lowerCamelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a__ ) != len(a__ ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' F' {len(a__ )} and {len(a__ )}' ) return images_with_overflow def snake_case_ ( self , *a__ , **a__ ): return self.tokenizer.batch_decode(*a__ , **a__ ) def snake_case_ ( self , *a__ , **a__ ): return self.tokenizer.decode(*a__ , **a__ ) @property def snake_case_ ( self ): return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def snake_case_ ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a__ , ) return self.image_processor_class @property def snake_case_ ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a__ , ) return self.image_processor
650
"""simple docstring""" def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Tuple )-> Dict: _lowerCamelCase = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _lowerCamelCase = [] _lowerCamelCase = list(range(snake_case ) ) # Find permutation while factorials: _lowerCamelCase = factorials.pop() _lowerCamelCase , _lowerCamelCase = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
650
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase__( snake_case__ ): '''simple docstring''' def _lowerCamelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase_ : Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__snake_case , '''width_multiplier''' ) ) class lowerCAmelCase__: '''simple docstring''' def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : int=13 , __snake_case : Tuple=64 , __snake_case : str=2 , __snake_case : Dict=3 , __snake_case : List[Any]="swish" , __snake_case : Optional[int]=3 , __snake_case : Any=32 , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Dict=True , __snake_case : Optional[Any]=10 , __snake_case : str=None , __snake_case : Tuple=0.25 , __snake_case : Dict=0.0 , __snake_case : Any=0.0 , ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : List[str] = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : List[Any] = make_divisible(512 * width_multiplier , divisor=8 ) UpperCAmelCase_ : Dict = hidden_act UpperCAmelCase_ : Union[str, Any] = conv_kernel_size UpperCAmelCase_ : Optional[int] = output_stride UpperCAmelCase_ : Optional[int] = classifier_dropout_prob UpperCAmelCase_ : List[Any] = use_labels UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Tuple = num_labels UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : Dict = scope UpperCAmelCase_ : Optional[int] = width_multiplier UpperCAmelCase_ : int = ffn_dropout UpperCAmelCase_ : Dict = attn_dropout def _lowerCamelCase ( self : str ): '''simple docstring''' UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None UpperCAmelCase_ : List[str] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _lowerCamelCase ( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = MobileViTVaModel(config=__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : Optional[int] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Any ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = self.num_labels UpperCAmelCase_ : str = MobileViTVaForImageClassification(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase_ : Optional[int] = model(__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self : str , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Dict = self.num_labels UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation(__snake_case ) model.to(__snake_case ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(__snake_case ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) UpperCAmelCase_ : Optional[Any] = model(__snake_case , labels=__snake_case ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' A_ : List[str] = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) A_ : Union[str, Any] = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) A_ : Union[str, Any] = False A_ : List[str] = False A_ : List[str] = False A_ : List[Any] = False def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : str = MobileViTVaModelTester(self ) UpperCAmelCase_ : int = MobileViTVaConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case ) def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def _lowerCamelCase ( self : int ): '''simple docstring''' pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def _lowerCamelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowerCamelCase ( self : str ): '''simple docstring''' pass def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : str = model_class(__snake_case ) UpperCAmelCase_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Dict = [*signature.parameters.keys()] UpperCAmelCase_ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __snake_case ) def _lowerCamelCase ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) def _lowerCamelCase ( self : Tuple ): '''simple docstring''' def check_hidden_states_output(__snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] ): UpperCAmelCase_ : Optional[int] = model_class(__snake_case ) model.to(__snake_case ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(__snake_case , __snake_case ) ) UpperCAmelCase_ : str = outputs.hidden_states UpperCAmelCase_ : Union[str, Any] = 5 self.assertEqual(len(__snake_case ) , __snake_case ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. UpperCAmelCase_ : List[str] = 2 for i in range(len(__snake_case ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case ) def _lowerCamelCase ( self : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case ) @slow def _lowerCamelCase ( self : List[str] ): '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = MobileViTVaModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) def snake_case_ ( ): UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __snake_case ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : Tuple = prepare_img() UpperCAmelCase_ : Optional[int] = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**__snake_case ) # verify the logits UpperCAmelCase_ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __snake_case ) UpperCAmelCase_ : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase_ : Union[str, Any] = model.to(__snake_case ) UpperCAmelCase_ : List[str] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase_ : List[Any] = prepare_img() UpperCAmelCase_ : Any = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**__snake_case ) UpperCAmelCase_ : Dict = outputs.logits # verify the logits UpperCAmelCase_ : Optional[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __snake_case ) UpperCAmelCase_ : Optional[int] = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=__snake_case , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def _lowerCamelCase ( self : str ): '''simple docstring''' UpperCAmelCase_ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase_ : Optional[int] = model.to(__snake_case ) UpperCAmelCase_ : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) UpperCAmelCase_ : Optional[int] = prepare_img() UpperCAmelCase_ : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase_ : str = model(**__snake_case ) UpperCAmelCase_ : Any = outputs.logits.detach().cpu() UpperCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(50, 60)] ) UpperCAmelCase_ : int = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __snake_case ) UpperCAmelCase_ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case ) UpperCAmelCase_ : str = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __snake_case )
641
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Tuple = logging.get_logger(__name__) class lowerCAmelCase__( snake_case__ ): '''simple docstring''' A_ : Union[str, Any] = 'encoder-decoder' A_ : Optional[int] = True def __init__( self : Dict , **__snake_case : Union[str, Any] ): '''simple docstring''' super().__init__(**__snake_case ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" UpperCAmelCase_ : int = kwargs.pop('''encoder''' ) UpperCAmelCase_ : List[Any] = encoder_config.pop('''model_type''' ) UpperCAmelCase_ : int = kwargs.pop('''decoder''' ) UpperCAmelCase_ : Union[str, Any] = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig UpperCAmelCase_ : Optional[int] = AutoConfig.for_model(__snake_case , **__snake_case ) UpperCAmelCase_ : Any = AutoConfig.for_model(__snake_case , **__snake_case ) UpperCAmelCase_ : List[Any] = True @classmethod def _lowerCamelCase ( cls : Optional[Any] , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Union[str, Any] ): '''simple docstring''' logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) UpperCAmelCase_ : Dict = True UpperCAmelCase_ : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__snake_case ) def _lowerCamelCase ( self : Dict ): '''simple docstring''' UpperCAmelCase_ : int = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ : Tuple = self.encoder.to_dict() UpperCAmelCase_ : Tuple = self.decoder.to_dict() UpperCAmelCase_ : Tuple = self.__class__.model_type return output
641
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {"configuration_mmbt": ["MMBTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
45
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class SCREAMING_SNAKE_CASE ( __a ): """simple docstring""" def __init__( self : List[Any] , __lowerCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Optional[int] , ): """simple docstring""" super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase ) _lowerCAmelCase = Sql( cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , ) def a ( self : str ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None self.builder.download_and_prepare( download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , ) # Build dataset for splits _lowerCAmelCase = self.builder.as_dataset( split='train' , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory ) return dataset class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Dict , __lowerCAmelCase : Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ): """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) _lowerCAmelCase = dataset _lowerCAmelCase = name _lowerCAmelCase = con _lowerCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _lowerCAmelCase = num_proc _lowerCAmelCase = to_sql_kwargs def a ( self : Optional[int] ): """simple docstring""" _lowerCAmelCase = self.to_sql_kwargs.pop('sql' , __lowerCAmelCase ) _lowerCAmelCase = self.to_sql_kwargs.pop('con' , __lowerCAmelCase ) _lowerCAmelCase = self.to_sql_kwargs.pop('index' , __lowerCAmelCase ) _lowerCAmelCase = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs ) return written def a ( self : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = args _lowerCAmelCase = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _lowerCAmelCase = query_table( table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) _lowerCAmelCase = batch.to_pandas() _lowerCAmelCase = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase ) return num_rows or len(__lowerCAmelCase ) def a ( self : Dict , __lowerCAmelCase : List[Any] , **__lowerCAmelCase : Dict ): """simple docstring""" _lowerCAmelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _lowerCAmelCase , _lowerCAmelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ): written += num_rows return written
309
0
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = "geglu" , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = "layer_norm" , lowerCAmelCase_ = False , ) -> List[str]: super().__init__() _snake_case = only_cross_attention _snake_case = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" _snake_case = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _snake_case = AdaLayerNorm(__a , __a ) elif self.use_ada_layer_norm_zero: _snake_case = AdaLayerNormZero(__a , __a ) else: _snake_case = nn.LayerNorm(__a , elementwise_affine=__a ) _snake_case = Attention( query_dim=__a , heads=__a , dim_head=__a , dropout=__a , bias=__a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__a , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _snake_case = ( AdaLayerNorm(__a , __a ) if self.use_ada_layer_norm else nn.LayerNorm(__a , elementwise_affine=__a ) ) _snake_case = Attention( query_dim=__a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__a , dim_head=__a , dropout=__a , bias=__a , upcast_attention=__a , ) # is self-attn if encoder_hidden_states is none else: _snake_case = None _snake_case = None # 3. Feed-forward _snake_case = nn.LayerNorm(__a , elementwise_affine=__a ) _snake_case = FeedForward(__a , dropout=__a , activation_fn=__a , final_dropout=__a ) # let chunk size default to None _snake_case = None _snake_case = 0 def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: # Sets chunk feed-forward _snake_case = chunk_size _snake_case = dim def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> Optional[Any]: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: _snake_case = self.norma(__a , __a ) elif self.use_ada_layer_norm_zero: _snake_case = self.norma( __a , __a , __a , hidden_dtype=hidden_states.dtype ) else: _snake_case = self.norma(__a ) _snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {} _snake_case = self.attna( __a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__a , **__a , ) if self.use_ada_layer_norm_zero: _snake_case = gate_msa.unsqueeze(1 ) * attn_output _snake_case = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _snake_case = ( self.norma(__a , __a ) if self.use_ada_layer_norm else self.norma(__a ) ) _snake_case = self.attna( __a , encoder_hidden_states=__a , attention_mask=__a , **__a , ) _snake_case = attn_output + hidden_states # 3. Feed-forward _snake_case = self.norma(__a ) if self.use_ada_layer_norm_zero: _snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) _snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _snake_case = torch.cat( [self.ff(__a ) for hid_slice in norm_hidden_states.chunk(__a , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: _snake_case = self.ff(__a ) if self.use_ada_layer_norm_zero: _snake_case = gate_mlp.unsqueeze(1 ) * ff_output _snake_case = ff_output + hidden_states return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = "geglu" , lowerCAmelCase_ = False , ) -> str: super().__init__() _snake_case = int(dim * mult ) _snake_case = dim_out if dim_out is not None else dim if activation_fn == "gelu": _snake_case = GELU(__a , __a ) if activation_fn == "gelu-approximate": _snake_case = GELU(__a , __a , approximate='tanh' ) elif activation_fn == "geglu": _snake_case = GEGLU(__a , __a ) elif activation_fn == "geglu-approximate": _snake_case = ApproximateGELU(__a , __a ) _snake_case = nn.ModuleList([] ) # project in self.net.append(__a ) # project dropout self.net.append(nn.Dropout(__a ) ) # project out self.net.append(nn.Linear(__a , __a ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__a ) ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: for module in self.net: _snake_case = module(__a ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "none" ) -> List[str]: super().__init__() _snake_case = nn.Linear(__a , __a ) _snake_case = approximate def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Dict: if gate.device.type != "mps": return F.gelu(__a , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple: _snake_case = self.proj(__a ) _snake_case = self.gelu(__a ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: super().__init__() _snake_case = nn.Linear(__a , dim_out * 2 ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]: if gate.device.type != "mps": return F.gelu(__a ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]: _snake_case = self.proj(__a ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__a ) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: super().__init__() _snake_case = nn.Linear(__a , __a ) def lowerCAmelCase ( self , lowerCAmelCase_ ) -> str: _snake_case = self.proj(__a ) return x * torch.sigmoid(1.7_02 * x ) class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: super().__init__() _snake_case = nn.Embedding(__a , __a ) _snake_case = nn.SiLU() _snake_case = nn.Linear(__a , embedding_dim * 2 ) _snake_case = nn.LayerNorm(__a , elementwise_affine=__a ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = self.linear(self.silu(self.emb(__a ) ) ) _snake_case = torch.chunk(__a , 2 ) _snake_case = self.norm(__a ) * (1 + scale) + shift return x class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: super().__init__() _snake_case = CombinedTimestepLabelEmbeddings(__a , __a ) _snake_case = nn.SiLU() _snake_case = nn.Linear(__a , 6 * embedding_dim , bias=__a ) _snake_case = nn.LayerNorm(__a , elementwise_affine=__a , eps=1E-6 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> int: _snake_case = self.linear(self.silu(self.emb(__a , __a , hidden_dtype=__a ) ) ) _snake_case = emb.chunk(6 , dim=1 ) _snake_case = self.norm(__a ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class UpperCamelCase_ ( nn.Module ): def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = 1E-5 ) -> Dict: super().__init__() _snake_case = num_groups _snake_case = eps if act_fn is None: _snake_case = None else: _snake_case = get_activation(__a ) _snake_case = nn.Linear(__a , out_dim * 2 ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: if self.act: _snake_case = self.act(__a ) _snake_case = self.linear(__a ) _snake_case = emb[:, :, None, None] _snake_case = emb.chunk(2 , dim=1 ) _snake_case = F.group_norm(__a , self.num_groups , eps=self.eps ) _snake_case = x * (1 + scale) + shift return x
707
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCamelCase_ ( unittest.TestCase ): def lowerCAmelCase ( self ) -> List[Any]: debug_launcher(test_script.main ) def lowerCAmelCase ( self ) -> Optional[Any]: debug_launcher(test_ops.main )
541
0
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> float: '''simple docstring''' lowerCamelCase_ : Optional[int] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowercase )] ) lowerCamelCase_ : int = np.array(_lowercase ) lowerCamelCase_ : Any = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowercase ) ) , x.transpose() ) , _lowercase ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float: '''simple docstring''' lowerCamelCase_ : Tuple = (1, 2, 1) lowerCamelCase_ : int = (1, 1, 0, 7) lowerCamelCase_ : List[Any] = SARIMAX( _lowercase , exog=_lowercase , order=_lowercase , seasonal_order=_lowercase ) lowerCamelCase_ : str = model.fit(disp=_lowercase , maxiter=600 , method='''nm''' ) lowerCamelCase_ : Optional[Any] = model_fit.predict(1 , len(_lowercase ) , exog=[test_match] ) return result[0] def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float: '''simple docstring''' lowerCamelCase_ : Dict = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(_lowercase , _lowercase ) lowerCamelCase_ : Union[str, Any] = regressor.predict(_lowercase ) return y_pred[0] def lowercase_ ( _lowercase ) -> float: '''simple docstring''' train_user.sort() lowerCamelCase_ : Union[str, Any] = np.percentile(_lowercase , 25 ) lowerCamelCase_ : Union[str, Any] = np.percentile(_lowercase , 75 ) lowerCamelCase_ : int = qa - qa lowerCamelCase_ : Optional[int] = qa - (iqr * 0.1) return low_lim def lowercase_ ( _lowercase , _lowercase ) -> bool: '''simple docstring''' lowerCamelCase_ : Union[str, Any] = 0 lowerCamelCase_ : List[str] = 0 for i in list_vote: if i > actual_result: lowerCamelCase_ : List[str] = not_safe + 1 else: if abs(abs(_lowercase ) - abs(_lowercase ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __lowercase : Any = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]] __lowercase : Union[str, Any] = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __lowercase : List[Any] = Normalizer().fit_transform(data_input_df.values) # split data __lowercase : Dict = normalize_df[:, 2].tolist() __lowercase : Optional[Any] = normalize_df[:, 0].tolist() __lowercase : int = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __lowercase : Dict = normalize_df[:, [1, 2]].tolist() __lowercase : str = x[: len(x) - 1] __lowercase : Optional[Any] = x[len(x) - 1 :] # for linear regression & sarimax __lowercase : int = total_date[: len(total_date) - 1] __lowercase : List[Any] = total_user[: len(total_user) - 1] __lowercase : List[Any] = total_match[: len(total_match) - 1] __lowercase : Dict = total_date[len(total_date) - 1 :] __lowercase : List[Any] = total_user[len(total_user) - 1 :] __lowercase : str = total_match[len(total_match) - 1 :] # voting system with forecasting __lowercase : Any = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __lowercase : List[Any] = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
422
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowercase : Optional[Any] = logging.get_logger(__name__) __lowercase : str = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowercase ( _lowercase ): lowerCamelCase : Optional[Any] = "segformer" def __init__(self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[3_2, 6_4, 1_6_0, 2_5_6] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.1 , A=0.02 , A=0.1 , A=1E-6 , A=2_5_6 , A=2_5_5 , **A , ): super().__init__(**A ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( '''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be''' ''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , A , ) lowerCamelCase_ : Any = num_channels lowerCamelCase_ : Dict = num_encoder_blocks lowerCamelCase_ : str = depths lowerCamelCase_ : Optional[Any] = sr_ratios lowerCamelCase_ : Dict = hidden_sizes lowerCamelCase_ : List[str] = patch_sizes lowerCamelCase_ : Any = strides lowerCamelCase_ : Any = mlp_ratios lowerCamelCase_ : Optional[int] = num_attention_heads lowerCamelCase_ : int = hidden_act lowerCamelCase_ : List[Any] = hidden_dropout_prob lowerCamelCase_ : int = attention_probs_dropout_prob lowerCamelCase_ : List[str] = classifier_dropout_prob lowerCamelCase_ : Union[str, Any] = initializer_range lowerCamelCase_ : Optional[Any] = drop_path_rate lowerCamelCase_ : Optional[Any] = layer_norm_eps lowerCamelCase_ : Optional[Any] = decoder_hidden_size lowerCamelCase_ : Union[str, Any] = kwargs.get('''reshape_last_stage''' , A ) lowerCamelCase_ : List[Any] = semantic_loss_ignore_index class __lowercase ( _lowercase ): lowerCamelCase : List[str] = version.parse("1.11" ) @property def UpperCAmelCase__ (self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ (self ): return 1E-4 @property def UpperCAmelCase__ (self ): return 1_2
422
1
from collections import namedtuple import requests from lxml import html # type: ignore UpperCAmelCase__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""") def __lowercase ( _A = "https://www.worldometers.info/coronavirus/" ) -> covid_data: SCREAMING_SNAKE_CASE : Any = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(__UpperCAmelCase ).content ).xpath(__UpperCAmelCase ) ) UpperCAmelCase__ : Any = """Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
713
from typing import Any import numpy as np def __lowercase ( _A ) -> bool: return np.array_equal(_A , matrix.conjugate().T ) def __lowercase ( _A , _A ) -> Any: SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T SCREAMING_SNAKE_CASE : List[Any] = v_star.dot(_A ) assert isinstance(_A , np.ndarray ) return (v_star_dot.dot(_A )) / (v_star.dot(_A )) def __lowercase ( ) -> None: SCREAMING_SNAKE_CASE : Any = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] ) SCREAMING_SNAKE_CASE : Optional[int] = np.array([[1], [2], [3]] ) assert is_hermitian(_A ), F"{a} is not hermitian." print(rayleigh_quotient(_A , _A ) ) SCREAMING_SNAKE_CASE : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(_A ), F"{a} is not hermitian." assert rayleigh_quotient(_A , _A ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
446
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class a ( _lowerCAmelCase ): lowercase_ : Dict = 'gpt_neox' def __init__( self : Optional[int] , snake_case__ : str=50_432 , snake_case__ : Optional[Any]=6_144 , snake_case__ : List[Any]=44 , snake_case__ : Union[str, Any]=64 , snake_case__ : List[str]=24_576 , snake_case__ : List[Any]="gelu" , snake_case__ : str=0.2_5 , snake_case__ : List[Any]=10_000 , snake_case__ : str=0.0 , snake_case__ : Any=0.0 , snake_case__ : int=0.1 , snake_case__ : List[str]=2_048 , snake_case__ : Union[str, Any]=0.0_2 , snake_case__ : List[str]=1E-5 , snake_case__ : List[Any]=True , snake_case__ : List[str]=0 , snake_case__ : Dict=2 , snake_case__ : List[str]=False , snake_case__ : List[Any]=True , snake_case__ : Dict=None , **snake_case__ : str , ): """simple docstring""" super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = rotary_pct __lowerCAmelCase = rotary_emb_base __lowerCAmelCase = attention_dropout __lowerCAmelCase = hidden_dropout __lowerCAmelCase = classifier_dropout __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = use_cache __lowerCAmelCase = tie_word_embeddings __lowerCAmelCase = use_parallel_residual __lowerCAmelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F"got {self.rope_scaling}" ) __lowerCAmelCase = self.rope_scaling.get("type" , _lowerCAmelCase ) __lowerCAmelCase = self.rope_scaling.get("factor" , _lowerCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
611
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCamelCase : str = logging.get_logger(__name__) __UpperCamelCase : Dict = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __UpperCamelCase : Optional[int] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __UpperCamelCase : Dict = {"""facebook/blenderbot_small-90M""": 512} def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = set() __lowercase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase = char __lowercase = set(lowerCamelCase ) return pairs class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :List[Any] = VOCAB_FILES_NAMES __snake_case :Tuple = PRETRAINED_VOCAB_FILES_MAP __snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case :str = ['input_ids', 'attention_mask'] def __init__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str="__start__" , _lowerCAmelCase : int="__end__" , _lowerCAmelCase : Any="__unk__" , _lowerCAmelCase : List[Any]="__null__" , **_lowerCAmelCase : Tuple , ) -> str: """simple docstring""" super().__init__(unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase ) with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle: __lowercase = json.load(_lowerCAmelCase ) __lowercase = {v: k for k, v in self.encoder.items()} with open(_lowerCAmelCase , encoding="""utf-8""" ) as merges_handle: __lowercase = merges_handle.read().split("""\n""" )[1:-1] __lowercase = [tuple(merge.split() ) for merge in merges] __lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) __lowercase = {} @property def _a ( self : Union[str, Any] ) -> int: """simple docstring""" return len(self.encoder ) def _a ( self : Dict ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _a ( self : str , _lowerCAmelCase : str ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] __lowercase = re.sub("""([.,!?()])""" , r""" \1""" , _lowerCAmelCase ) __lowercase = re.sub("""(')""" , r""" \1 """ , _lowerCAmelCase ) __lowercase = re.sub(r"""\s{2,}""" , """ """ , _lowerCAmelCase ) if "\n" in token: __lowercase = token.replace("""\n""" , """ __newln__""" ) __lowercase = token.split(""" """ ) __lowercase = [] for token in tokens: if not len(_lowerCAmelCase ): continue __lowercase = token.lower() __lowercase = tuple(_lowerCAmelCase ) __lowercase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __lowercase = get_pairs(_lowerCAmelCase ) if not pairs: words.append(_lowerCAmelCase ) continue while True: __lowercase = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __lowercase , __lowercase = bigram __lowercase = [] __lowercase = 0 while i < len(_lowerCAmelCase ): try: __lowercase = word.index(_lowerCAmelCase , _lowerCAmelCase ) new_word.extend(word[i:j] ) __lowercase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase = tuple(_lowerCAmelCase ) __lowercase = new_word if len(_lowerCAmelCase ) == 1: break else: __lowercase = get_pairs(_lowerCAmelCase ) __lowercase = """@@ """.join(_lowerCAmelCase ) __lowercase = word[:-4] __lowercase = word words.append(_lowerCAmelCase ) return " ".join(_lowerCAmelCase ) def _a ( self : Optional[Any] , _lowerCAmelCase : str ) -> List[str]: """simple docstring""" __lowercase = [] __lowercase = re.findall(r"""\S+\n?""" , _lowerCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(""" """ ) ) ) return split_tokens def _a ( self : Tuple , _lowerCAmelCase : str ) -> int: """simple docstring""" __lowercase = token.lower() return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) ) def _a ( self : Tuple , _lowerCAmelCase : int ) -> str: """simple docstring""" return self.decoder.get(_lowerCAmelCase , self.unk_token ) def _a ( self : Dict , _lowerCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = """ """.join(_lowerCAmelCase ).replace("""@@ """ , """""" ).strip() return out_string def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_lowerCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowercase = os.path.join( _lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" ) __lowercase = 0 with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) __lowercase = token_index writer.write(""" """.join(_lowerCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file
80
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def A ( *a_ : Tuple , **a_ : Optional[int] ): """simple docstring""" pass @is_pipeline_test @require_vision @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): __SCREAMING_SNAKE_CASE = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A ( self : int , a_ : str , a_ : Union[str, Any] , a_ : Tuple ): """simple docstring""" __snake_case = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) __snake_case = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def A ( self : str , a_ : List[str] , a_ : List[str] ): """simple docstring""" __snake_case = object_detector(examples[0] , threshold=0.0 ) __snake_case = len(a_ ) self.assertGreater(a_ , 0 ) self.assertEqual( a_ , [ { "score": ANY(a_ ), "label": ANY(a_ ), "box": {"xmin": ANY(a_ ), "ymin": ANY(a_ ), "xmax": ANY(a_ ), "ymax": ANY(a_ )}, } for i in range(a_ ) ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A ( self : List[Any] ): """simple docstring""" pass @require_torch def A ( self : Tuple ): """simple docstring""" __snake_case = pipeline( "zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" ) __snake_case = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] , ) __snake_case = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ] , ) @require_torch @slow def A ( self : Optional[Any] ): """simple docstring""" __snake_case = pipeline("zero-shot-object-detection" ) __snake_case = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ] , ) __snake_case = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ] , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ] , ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF" ) def A ( self : Union[str, Any] ): """simple docstring""" pass @require_torch @slow def A ( self : Optional[int] ): """simple docstring""" __snake_case = 0.2 __snake_case = pipeline("zero-shot-object-detection" ) __snake_case = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=a_ , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ] , ) @require_torch @slow def A ( self : int ): """simple docstring""" __snake_case = 2 __snake_case = pipeline("zero-shot-object-detection" ) __snake_case = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=a_ , ) self.assertEqual( nested_simplify(a_ , decimals=4 ) , [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ] , )
714
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer a : List[Any] = logging.get_logger(__name__) a : Dict = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a : Any = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } a : Optional[int] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ): """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , ) __snake_case = add_prefix_space def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ): """simple docstring""" __snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ): """simple docstring""" __snake_case = [self.sep_token_id] __snake_case = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
680
0