code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline __lowerCamelCase = "path-to-your-trained-model" __lowerCamelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda") __lowerCamelCase = "A photo of sks dog in a bucket" __lowerCamelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
490
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva __lowerCamelCase = "" __lowerCamelCase = "" __lowerCamelCase = "" __lowerCamelCase = 1 # (0 is vertical, 1 is horizontal) def lowercase ( ) -> None: __magic_name__ , __magic_name__ = get_dataset(__UpperCamelCase , __UpperCamelCase ) print('''Processing...''' ) __magic_name__ , __magic_name__ , __magic_name__ = update_image_and_anno(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for index, image in enumerate(__UpperCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __magic_name__ = random_chars(32 ) __magic_name__ = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __magic_name__ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' ) __magic_name__ = [] for anno in new_annos[index]: __magic_name__ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__UpperCamelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> tuple[list, list]: __magic_name__ = [] __magic_name__ = [] for label_file in glob.glob(os.path.join(__UpperCamelCase , '''*.txt''' ) ): __magic_name__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__UpperCamelCase ) as in_file: __magic_name__ = in_file.readlines() __magic_name__ = os.path.join(__UpperCamelCase , f'''{label_name}.jpg''' ) __magic_name__ = [] for obj_list in obj_lists: __magic_name__ = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__UpperCamelCase ) labels.append(__UpperCamelCase ) return img_paths, labels def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 ) -> tuple[list, list, list]: __magic_name__ = [] __magic_name__ = [] __magic_name__ = [] for idx in range(len(__UpperCamelCase ) ): __magic_name__ = [] __magic_name__ = img_list[idx] path_list.append(__UpperCamelCase ) __magic_name__ = anno_list[idx] __magic_name__ = cva.imread(__UpperCamelCase ) if flip_type == 1: __magic_name__ = cva.flip(__UpperCamelCase , __UpperCamelCase ) for bbox in img_annos: __magic_name__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __magic_name__ = cva.flip(__UpperCamelCase , __UpperCamelCase ) for bbox in img_annos: __magic_name__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__UpperCamelCase ) new_imgs_list.append(__UpperCamelCase ) return new_imgs_list, new_annos_lists, path_list def lowercase ( __UpperCamelCase = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __magic_name__ = ascii_lowercase + digits return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
490
1
import heapq as hq import math from collections.abc import Iterator class _lowerCAmelCase : def __init__( self : Tuple , a : Any ) -> str: """simple docstring""" lowercase = str(id_ ) lowercase = None lowercase = None lowercase = [] lowercase = {} # {vertex:distance} def __lt__( self : int , a : List[Any] ) -> Dict: """simple docstring""" return self.key < other.key def __repr__( self : List[str] ) -> List[Any]: """simple docstring""" return self.id def _lowerCAmelCase ( self : List[str] , a : Optional[int] ) -> List[Any]: """simple docstring""" self.neighbors.append(__A ) def _lowerCAmelCase ( self : Optional[int] , a : Optional[int] , a : int ) -> List[str]: """simple docstring""" lowercase = weight def A_ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ): graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase ) graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase ) def A_ ( __UpperCamelCase : list , __UpperCamelCase : Vertex ): lowercase = [] for u in graph: lowercase = math.inf lowercase = None lowercase = 0 lowercase = graph[:] while q: lowercase = min(_lowerCAmelCase ) q.remove(_lowerCAmelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowercase = u lowercase = u.edges[v.id] for i in range(1 , len(_lowerCAmelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def A_ ( __UpperCamelCase : list , __UpperCamelCase : Vertex ): for u in graph: lowercase = math.inf lowercase = None lowercase = 0 lowercase = list(_lowerCAmelCase ) hq.heapify(_lowerCAmelCase ) while h: lowercase = hq.heappop(_lowerCAmelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowercase = u lowercase = u.edges[v.id] hq.heapify(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def A_ ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowerCAmelCase ( __snake_case ): __lowerCAmelCase : Union[str, Any] = ['''image_processor''', '''tokenizer'''] __lowerCAmelCase : List[Any] = '''ViTImageProcessor''' __lowerCAmelCase : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Dict , a : Optional[int]=None , a : str=None , **a : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a , ) lowercase = kwargs.pop('''feature_extractor''' ) lowercase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a , a ) def __call__( self : Optional[int] , a : str=None , a : List[Any]=None , a : List[Any]=None , a : Optional[Any]=None , **a : Dict ) -> Optional[Any]: """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: lowercase = self.tokenizer(a , return_tensors=a , **a ) if visual_prompt is not None: lowercase = self.image_processor(a , return_tensors=a , **a ) if images is not None: lowercase = self.image_processor(a , return_tensors=a , **a ) if visual_prompt is not None and images is not None: lowercase = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowercase = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowercase = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def _lowerCAmelCase ( self : Any , *a : str , **a : Tuple ) -> str: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def _lowerCAmelCase ( self : Optional[Any] , *a : Tuple , **a : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , ) return self.image_processor_class @property def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , ) return self.image_processor
396
0
'''simple docstring''' from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name SCREAMING_SNAKE_CASE_: List[Any] ='\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n' def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[str]=8 ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 UpperCAmelCase_ = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class __A ( UpperCamelCase__ ): def __init__(self : Dict , __a : MultilingualCLIP , __a : XLMRobertaTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, DDPMScheduler] , __a : VQModel , ): super().__init__() self.register_modules( text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , movq=__a , ) UpperCAmelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowercase (self : Union[str, Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[Any] , __a : Dict , __a : Tuple , __a : Dict ): if latents is None: UpperCAmelCase_ = randn_tensor(__a , generator=__a , device=__a , dtype=__a ) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) UpperCAmelCase_ = latents.to(__a ) UpperCAmelCase_ = latents * scheduler.init_noise_sigma return latents def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] , __a : Optional[Any] , __a : Any=None , ): UpperCAmelCase_ = len(__a ) if isinstance(__a , __a ) else 1 # get prompt text embeddings UpperCAmelCase_ = self.tokenizer( __a , padding="max_length" , truncation=__a , max_length=77 , return_attention_mask=__a , add_special_tokens=__a , return_tensors="pt" , ) UpperCAmelCase_ = text_inputs.input_ids UpperCAmelCase_ = self.tokenizer(__a , padding="longest" , return_tensors="pt" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__a , __a ): UpperCAmelCase_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) UpperCAmelCase_ = text_input_ids.to(__a ) UpperCAmelCase_ = text_inputs.attention_mask.to(__a ) UpperCAmelCase_ , UpperCAmelCase_ = self.text_encoder( input_ids=__a , attention_mask=__a ) UpperCAmelCase_ = prompt_embeds.repeat_interleave(__a , dim=0 ) UpperCAmelCase_ = text_encoder_hidden_states.repeat_interleave(__a , dim=0 ) UpperCAmelCase_ = text_mask.repeat_interleave(__a , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ = 42 if negative_prompt is None: UpperCAmelCase_ = [""] * batch_size elif type(__a ) is not type(__a ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !=""" f""" {type(__a )}.""" ) elif isinstance(__a , __a ): UpperCAmelCase_ = [negative_prompt] elif batch_size != len(__a ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" " the batch size of `prompt`." ) else: UpperCAmelCase_ = negative_prompt UpperCAmelCase_ = self.tokenizer( __a , padding="max_length" , max_length=77 , truncation=__a , return_attention_mask=__a , add_special_tokens=__a , return_tensors="pt" , ) UpperCAmelCase_ = uncond_input.input_ids.to(__a ) UpperCAmelCase_ = uncond_input.attention_mask.to(__a ) UpperCAmelCase_ , UpperCAmelCase_ = self.text_encoder( input_ids=__a , attention_mask=__a ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase_ = negative_prompt_embeds.shape[1] UpperCAmelCase_ = negative_prompt_embeds.repeat(1 , __a ) UpperCAmelCase_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a ) UpperCAmelCase_ = uncond_text_encoder_hidden_states.shape[1] UpperCAmelCase_ = uncond_text_encoder_hidden_states.repeat(1 , __a , 1 ) UpperCAmelCase_ = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , __a , -1 ) UpperCAmelCase_ = uncond_text_mask.repeat_interleave(__a , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ = torch.cat([negative_prompt_embeds, prompt_embeds] ) UpperCAmelCase_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) UpperCAmelCase_ = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def _lowercase (self : List[Any] , __a : Tuple=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase_ = torch.device(f"""cuda:{gpu_id}""" ) UpperCAmelCase_ = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__a , __a ) def _lowercase (self : Tuple , __a : List[Any]=0 ): if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ): from accelerate import cpu_offload_with_hook else: raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." ) UpperCAmelCase_ = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("cpu" , silence_dtype_warnings=__a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase_ = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: UpperCAmelCase_ , UpperCAmelCase_ = cpu_offload_with_hook(__a , __a , prev_module_hook=__a ) if self.safety_checker is not None: UpperCAmelCase_ , UpperCAmelCase_ = cpu_offload_with_hook(self.safety_checker , __a , prev_module_hook=__a ) # We'll offload the last model manually. UpperCAmelCase_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase (self : int ): if not hasattr(self.unet , "_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(__a , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__a ) def __call__(self : List[str] , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, List[torch.FloatTensor]] , __a : Union[torch.FloatTensor, List[torch.FloatTensor]] , __a : Optional[Union[str, List[str]]] = None , __a : int = 512 , __a : int = 512 , __a : int = 100 , __a : float = 4.0 , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , ): if isinstance(__a , __a ): UpperCAmelCase_ = 1 elif isinstance(__a , __a ): UpperCAmelCase_ = len(__a ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__a )}""" ) UpperCAmelCase_ = self._execution_device UpperCAmelCase_ = batch_size * num_images_per_prompt UpperCAmelCase_ = guidance_scale > 1.0 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._encode_prompt( __a , __a , __a , __a , __a ) if isinstance(__a , __a ): UpperCAmelCase_ = torch.cat(__a , dim=0 ) if isinstance(__a , __a ): UpperCAmelCase_ = torch.cat(__a , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase_ = image_embeds.repeat_interleave(__a , dim=0 ) UpperCAmelCase_ = negative_image_embeds.repeat_interleave(__a , dim=0 ) UpperCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=__a ) self.scheduler.set_timesteps(__a , device=__a ) UpperCAmelCase_ = self.scheduler.timesteps UpperCAmelCase_ = self.unet.config.in_channels UpperCAmelCase_ , UpperCAmelCase_ = get_new_h_w(__a , __a , self.movq_scale_factor ) # create initial latent UpperCAmelCase_ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __a , __a , __a , self.scheduler , ) for i, t in enumerate(self.progress_bar(__a ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase_ = {"text_embeds": prompt_embeds, "image_embeds": image_embeds} UpperCAmelCase_ = self.unet( sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0] if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2 ) UpperCAmelCase_ , UpperCAmelCase_ = variance_pred.chunk(2 ) UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , "variance_type" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ = self.scheduler.step( __a , __a , __a , generator=__a , ).prev_sample # post-processing UpperCAmelCase_ = self.movq.decode(__a , force_not_quantize=__a )["sample"] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase_ = image * 0.5 + 0.5 UpperCAmelCase_ = image.clamp(0 , 1 ) UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase_ = self.numpy_to_pil(__a ) if not return_dict: return (image,) return ImagePipelineOutput(images=__a )
78
'''simple docstring''' import copy import re class __A : a__ : Optional[int] = """hp""" a__ : Optional[Any] = {} a__ : List[Any] = None @classmethod def _lowercase (cls : Optional[int] , __a : str , __a : Tuple ): UpperCAmelCase_ = prefix UpperCAmelCase_ = defaults cls.build_naming_info() @staticmethod def _lowercase (__a : List[Any] , __a : List[str] ): if len(__a ) == 0: return "" UpperCAmelCase_ = None if any(char.isdigit() for char in word ): raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a ) + 1 ): UpperCAmelCase_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: UpperCAmelCase_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a : Union[str, Any] ): UpperCAmelCase_ = "" while integer != 0: UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s integer //= 10 return s UpperCAmelCase_ = 0 while True: UpperCAmelCase_ = word + "#" + int_to_alphabetic(__a ) if sword in info["reverse_short_word"]: continue else: UpperCAmelCase_ = sword break UpperCAmelCase_ = short_word UpperCAmelCase_ = word return short_word @staticmethod def _lowercase (__a : List[str] , __a : Union[str, Any] ): UpperCAmelCase_ = param_name.split("_" ) UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name UpperCAmelCase_ = ["", "_"] for separator in separators: UpperCAmelCase_ = separator.join(__a ) if shortname not in info["reverse_short_param"]: UpperCAmelCase_ = shortname UpperCAmelCase_ = param_name return shortname return param_name @staticmethod def _lowercase (__a : int , __a : Union[str, Any] ): UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__a , __a ) UpperCAmelCase_ = short_name UpperCAmelCase_ = param_name @classmethod def _lowercase (cls : Any ): if cls.NAMING_INFO is not None: return UpperCAmelCase_ = { "short_word": {}, "reverse_short_word": {}, "short_param": {}, "reverse_short_param": {}, } UpperCAmelCase_ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__a , __a ) UpperCAmelCase_ = info @classmethod def _lowercase (cls : int , __a : Optional[int] ): cls.build_naming_info() assert cls.PREFIX is not None UpperCAmelCase_ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k] if isinstance(__a , __a ): UpperCAmelCase_ = 1 if v else 0 UpperCAmelCase_ = "" if isinstance(__a , (int, float) ) else "-" UpperCAmelCase_ = f"""{key}{sep}{v}""" name.append(__a ) return "_".join(__a ) @classmethod def _lowercase (cls : Dict , __a : Dict ): UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :] if repr == "": UpperCAmelCase_ = [] else: UpperCAmelCase_ = repr.split("_" ) UpperCAmelCase_ = {} for value in values: if "-" in value: UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" ) else: UpperCAmelCase_ = re.sub("[0-9.]" , "" , __a ) UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __a ) ) UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k] UpperCAmelCase_ = p_v for k in cls.DEFAULTS: if k not in parameters: UpperCAmelCase_ = cls.DEFAULTS[k] return parameters
78
1
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _snake_case (_snake_case : Any) -> Any: def wrapper(*_snake_case : Dict , **_snake_case : Dict): _lowercase =timeit.default_timer() _lowercase =func(*_snake_case , **_snake_case) _lowercase =timeit.default_timer() - starttime return delta _lowercase =func.__name__ return wrapper def _snake_case (_snake_case : dict , _snake_case : Optional[Any]=100 , _snake_case : str=None) -> Dict: _lowercase =[] _lowercase =seq_shapes or {} for i in range(_snake_case): _lowercase ={} for col_id, (k, v) in enumerate(features.items()): if isinstance(_snake_case , _ArrayXD): _lowercase =np.random.rand(*v.shape).astype(v.dtype) elif isinstance(_snake_case , datasets.Value): if v.dtype == "string": _lowercase ='The small grey turtle was surprisingly fast when challenged.' else: _lowercase =np.random.randint(10 , size=1).astype(v.dtype).item() elif isinstance(_snake_case , datasets.Sequence): while isinstance(_snake_case , datasets.Sequence): _lowercase =v.feature _lowercase =seq_shapes[k] _lowercase =np.random.rand(*_snake_case).astype(v.dtype) _lowercase =data dummy_data.append((i, example)) return dummy_data def _snake_case (_snake_case : Any , _snake_case : List[str] , _snake_case : Dict=100 , _snake_case : Any=None) -> Tuple: _lowercase =generate_examples(_snake_case , num_examples=_snake_case , seq_shapes=_snake_case) with ArrowWriter(features=_snake_case , path=_snake_case) as writer: for key, record in dummy_data: _lowercase =features.encode_example(_snake_case) writer.write(_snake_case) _lowercase , _lowercase =writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''') _lowercase =datasets.Dataset.from_file(filename=_snake_case , info=datasets.DatasetInfo(features=_snake_case)) return dataset
703
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : Tuple ='''visual_bert''' def __init__( self :Any, snake_case :Dict=3_0522, snake_case :Optional[Any]=768, snake_case :Optional[Any]=512, snake_case :Tuple=12, snake_case :List[Any]=12, snake_case :List[Any]=3072, snake_case :Optional[int]="gelu", snake_case :Union[str, Any]=0.1, snake_case :List[str]=0.1, snake_case :Optional[Any]=512, snake_case :Tuple=2, snake_case :Optional[int]=0.0_2, snake_case :Union[str, Any]=1e-1_2, snake_case :Union[str, Any]=False, snake_case :Optional[Any]=True, snake_case :Tuple=1, snake_case :int=0, snake_case :Any=2, **snake_case :Union[str, Any], ): """simple docstring""" super().__init__(pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case) _lowercase =vocab_size _lowercase =max_position_embeddings _lowercase =hidden_size _lowercase =visual_embedding_dim _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =intermediate_size _lowercase =hidden_act _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =initializer_range _lowercase =type_vocab_size _lowercase =layer_norm_eps _lowercase =bypass_transformer _lowercase =special_visual_initialize
557
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
592
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__(self : Optional[int] , a__ : Any , a__ : Dict=13 , a__ : Tuple=7 , a__ : Optional[Any]=True , a__ : Tuple=True , a__ : Dict=True , a__ : Dict=True , a__ : List[str]=99 , a__ : int=16 , a__ : int=36 , a__ : Any=6 , a__ : Dict=6 , a__ : Optional[int]=6 , a__ : Tuple=37 , a__ : Union[str, Any]="gelu" , a__ : str=0.1 , a__ : Any=0.1 , a__ : int=512 , a__ : List[str]=16 , a__ : Dict=2 , a__ : Any=0.0_2 , a__ : str=3 , a__ : Dict=4 , a__ : int=None , ): """simple docstring""" __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = embedding_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_hidden_groups __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def a (self : List[str] ): """simple docstring""" __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case = ids_tensor([self.batch_size] , self.num_choices ) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a (self : Tuple ): """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def a (self : str , a__ : Dict , a__ : Optional[Any] , a__ : Optional[int] , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Tuple ): """simple docstring""" __snake_case = AlbertModel(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ ) __snake_case = model(a__ , token_type_ids=a__ ) __snake_case = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a (self : Dict , a__ : Optional[int] , a__ : Dict , a__ : Any , a__ : Any , a__ : Dict , a__ : List[str] , a__ : Optional[int] ): """simple docstring""" __snake_case = AlbertForPreTraining(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , sentence_order_label=a__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def a (self : Tuple , a__ : Any , a__ : Union[str, Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Union[str, Any] , a__ : Optional[Any] ): """simple docstring""" __snake_case = AlbertForMaskedLM(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a (self : List[Any] , a__ : Dict , a__ : Optional[int] , a__ : Tuple , a__ : List[Any] , a__ : List[str] , a__ : Tuple , a__ : List[str] ): """simple docstring""" __snake_case = AlbertForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a (self : Any , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Any , a__ : Union[str, Any] , a__ : Any , a__ : int , a__ : List[str] ): """simple docstring""" __snake_case = self.num_labels __snake_case = AlbertForSequenceClassification(a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a (self : str , a__ : Any , a__ : int , a__ : List[str] , a__ : str , a__ : Dict , a__ : Union[str, Any] , a__ : Tuple ): """simple docstring""" __snake_case = self.num_labels __snake_case = AlbertForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __snake_case = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a (self : Union[str, Any] , a__ : str , a__ : Optional[Any] , a__ : int , a__ : Dict , a__ : int , a__ : Optional[int] , a__ : Union[str, Any] ): """simple docstring""" __snake_case = self.num_choices __snake_case = AlbertForMultipleChoice(config=a__ ) model.to(a__ ) model.eval() __snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case = model( a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a (self : Dict ): """simple docstring""" __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): A_ : Optional[int] = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A_ : List[Any] = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) A_ : Any = True def a (self : List[str] , a__ : str , a__ : Union[str, Any] , a__ : Optional[int]=False ): """simple docstring""" __snake_case = super()._prepare_for_class(a__ , a__ , return_labels=a__ ) if return_labels: if model_class in get_values(a__ ): __snake_case = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a__ ) __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a__ ) return inputs_dict def a (self : Any ): """simple docstring""" __snake_case = AlbertModelTester(self ) __snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 ) def a (self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def a (self : Optional[int] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def a (self : List[str] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a__ ) def a (self : str ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a__ ) def a (self : Optional[int] ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a__ ) def a (self : int ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def a (self : Dict ): """simple docstring""" __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*a__ ) @slow def a (self : Dict ): """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = AlbertModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def a (self : Tuple ): """simple docstring""" __snake_case = AlbertModel.from_pretrained('''albert-base-v2''' ) __snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case = model(a__ , attention_mask=a__ )[0] __snake_case = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a__ ) __snake_case = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1E-4 ) )
592
1
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ): """simple docstring""" lowerCAmelCase_ = old_name if "patch_embed" in old_name: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = old_name.split("." ) if layer == "0": lowerCAmelCase_ = old_name.replace("0" , "convolution1" ) elif layer == "1": lowerCAmelCase_ = old_name.replace("1" , "batchnorm_before" ) elif layer == "3": lowerCAmelCase_ = old_name.replace("3" , "convolution2" ) else: lowerCAmelCase_ = old_name.replace("4" , "batchnorm_after" ) if "network" in old_name and re.search(r"\d\.\d" , __lowerCAmelCase ): lowerCAmelCase_ = r"\b\d{2}\b" if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ): lowerCAmelCase_ = re.search(r"\d\.\d\d." , __lowerCAmelCase ).group() else: lowerCAmelCase_ = re.search(r"\d\.\d." , __lowerCAmelCase ).group() if int(match[0] ) < 6: lowerCAmelCase_ = old_name.replace(__lowerCAmelCase , "" ) lowerCAmelCase_ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] ) lowerCAmelCase_ = "intermediate_stages." + trimmed_name else: lowerCAmelCase_ = old_name.replace(__lowerCAmelCase , "" ) if int(match[2] ) < num_meta4D_last_stage: lowerCAmelCase_ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] ) else: lowerCAmelCase_ = str(int(match[2] ) - num_meta4D_last_stage ) lowerCAmelCase_ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index ) if "norm1" in old_name: lowerCAmelCase_ = trimmed_name.replace("norm1" , "layernorm1" ) elif "norm2" in old_name: lowerCAmelCase_ = trimmed_name.replace("norm2" , "layernorm2" ) elif "fc1" in old_name: lowerCAmelCase_ = trimmed_name.replace("fc1" , "linear_in" ) elif "fc2" in old_name: lowerCAmelCase_ = trimmed_name.replace("fc2" , "linear_out" ) lowerCAmelCase_ = "last_stage." + trimmed_name elif "network" in old_name and re.search(r".\d." , __lowerCAmelCase ): lowerCAmelCase_ = old_name.replace("network" , "intermediate_stages" ) if "fc" in new_name: lowerCAmelCase_ = new_name.replace("fc" , "convolution" ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): lowerCAmelCase_ = new_name.replace("norm1" , "batchnorm_before" ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): lowerCAmelCase_ = new_name.replace("norm2" , "batchnorm_after" ) if "proj" in new_name: lowerCAmelCase_ = new_name.replace("proj" , "projection" ) if "dist_head" in new_name: lowerCAmelCase_ = new_name.replace("dist_head" , "distillation_classifier" ) elif "head" in new_name: lowerCAmelCase_ = new_name.replace("head" , "classifier" ) elif "patch_embed" in new_name: lowerCAmelCase_ = "efficientformer." + new_name elif new_name == "norm.weight" or new_name == "norm.bias": lowerCAmelCase_ = new_name.replace("norm" , "layernorm" ) lowerCAmelCase_ = "efficientformer." + new_name else: lowerCAmelCase_ = "efficientformer.encoder." + new_name return new_name def lowerCamelCase__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ): """simple docstring""" for key in checkpoint.copy().keys(): lowerCAmelCase_ = checkpoint.pop(__lowerCAmelCase ) lowerCAmelCase_ = val return checkpoint def lowerCamelCase__ ( ): """simple docstring""" lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return image def lowerCamelCase__ ( __lowerCAmelCase : Path , __lowerCAmelCase : Path , __lowerCAmelCase : Path , __lowerCAmelCase : bool ): """simple docstring""" lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["model"] lowerCAmelCase_ = EfficientFormerConfig.from_json_file(__lowerCAmelCase ) lowerCAmelCase_ = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase ) lowerCAmelCase_ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] ) lowerCAmelCase_ = config.depths[-1] - config.num_metaad_blocks + 1 lowerCAmelCase_ = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase ) model.load_state_dict(__lowerCAmelCase ) model.eval() lowerCAmelCase_ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } # prepare image lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = 256 lowerCAmelCase_ = 224 lowerCAmelCase_ = EfficientFormerImageProcessor( size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , ) lowerCAmelCase_ = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values # original processing pipeline lowerCAmelCase_ = Compose( [ Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ), CenterCrop(__lowerCAmelCase ), ToTensor(), Normalize(__lowerCAmelCase , __lowerCAmelCase ), ] ) lowerCAmelCase_ = image_transforms(__lowerCAmelCase ).unsqueeze(0 ) assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ) lowerCAmelCase_ = model(__lowerCAmelCase ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = (1, 1000) if "l1" in model_name: lowerCAmelCase_ = torch.Tensor( [-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] ) assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: lowerCAmelCase_ = torch.Tensor( [-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] ) assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: lowerCAmelCase_ = torch.Tensor( [-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] ) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" ) # Save Checkpoints Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) model.save_pretrained(__lowerCAmelCase ) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) processor.save_pretrained(__lowerCAmelCase ) print(F"""Processor successfuly saved at {pytorch_dump_path}""" ) if push_to_hub: print("Pushing model to the hub..." ) model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to EfficientFormer pytorch checkpoint.", ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for EfficientFormer model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") parser.add_argument( "--no-push_to_hub", dest="push_to_hub", action="store_false", help="Do not push model and image processor to the hub", ) parser.set_defaults(push_to_hub=True) _A = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
279
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _A = "▁" _A = {"vocab_file": "spiece.model"} _A = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } _A = { "google/pegasus-xsum": 5_12, } _A = logging.get_logger(__name__) class _lowerCAmelCase ( __a ): _lowercase =VOCAB_FILES_NAMES _lowercase =VOCAB_FILES_NAMES _lowercase =PRETRAINED_VOCAB_FILES_MAP _lowercase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase =['''input_ids''', '''attention_mask'''] def __init__( self , _UpperCamelCase , _UpperCamelCase="<pad>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<mask_2>" , _UpperCamelCase="<mask_1>" , _UpperCamelCase=None , _UpperCamelCase=103 , _UpperCamelCase = None , **_UpperCamelCase , ) -> None: lowerCAmelCase_ = offset if additional_special_tokens is not None: if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise TypeError( f"""additional_special_tokens should be of type {type(_UpperCamelCase )}, but is""" f""" {type(_UpperCamelCase )}""" ) lowerCAmelCase_ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(_UpperCamelCase ) , self.offset - 1 ) ] if len(set(_UpperCamelCase ) ) != len(_UpperCamelCase ): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) lowerCAmelCase_ = additional_special_tokens_extended else: lowerCAmelCase_ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , mask_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token_sent=_UpperCamelCase , offset=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) lowerCAmelCase_ = mask_token_sent lowerCAmelCase_ = vocab_file lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) # add special tokens to encoder dict lowerCAmelCase_ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, } ) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} ) lowerCAmelCase_ = {v: k for k, v in self.encoder.items()} @property def __a ( self ) -> int: return len(self.sp_model ) + self.offset def __a ( self ) -> Dict[str, int]: lowerCAmelCase_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[Any]: lowerCAmelCase_ = self.__dict__.copy() lowerCAmelCase_ = None return state def __setstate__( self , _UpperCamelCase ) -> Optional[int]: lowerCAmelCase_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase_ = {} lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __a ( self , _UpperCamelCase ) -> List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def __a ( self , _UpperCamelCase ) -> int: if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] lowerCAmelCase_ = self.sp_model.piece_to_id(_UpperCamelCase ) return sp_id + self.offset def __a ( self , _UpperCamelCase ) -> str: if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: lowerCAmelCase_ = self.sp_model.IdToPiece(index - self.offset ) return token def __a ( self , _UpperCamelCase ) -> Optional[Any]: lowerCAmelCase_ = [] lowerCAmelCase_ = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_UpperCamelCase ) + token lowerCAmelCase_ = [] else: current_sub_tokens.append(_UpperCamelCase ) out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def __a ( self , _UpperCamelCase=False ) -> Optional[int]: return 1 def __a ( self , _UpperCamelCase ) -> int: lowerCAmelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def __a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]: if already_has_special_tokens: return self._special_token_mask(_UpperCamelCase ) elif token_ids_a is None: return self._special_token_mask(_UpperCamelCase ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def __a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def __a ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase_ = os.path.join( _UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , "wb" ) as fi: lowerCAmelCase_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
279
1
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 lowerCAmelCase : str = get_tests_dir("""fixtures/dummy-config.json""") class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 0 def a ( self ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) ) def a ( self ): '''simple docstring''' _lowerCAmelCase : str = AutoConfig.from_pretrained('bert-base-uncased' ) self.assertIsInstance(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = AutoConfig.for_model('roberta' ) self.assertIsInstance(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. _lowerCAmelCase : List[Any] = os.path.join(snake_case__ , 'fake-roberta' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) with open(os.path.join(snake_case__ , 'config.json' ) , 'w' ) as f: f.write(json.dumps({} ) ) _lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case__ ) self.assertEqual(type(snake_case__ ) , snake_case__ ) def a ( self ): '''simple docstring''' try: AutoConfig.register('custom' , snake_case__ ) # Wrong model type will raise an error with self.assertRaises(snake_case__ ): AutoConfig.register('model' , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoConfig.register('bert' , snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCAmelCase : Any = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(snake_case__ ) _lowerCAmelCase : str = AutoConfig.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def a ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCAmelCase : Dict = AutoConfig.from_pretrained('bert-base' ) def a ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case__ , revision='aaaaaa' ) def a ( self ): '''simple docstring''' with self.assertRaisesRegex( snake_case__ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ): _lowerCAmelCase : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' ) def a ( self ): '''simple docstring''' with self.assertRaises(snake_case__ ): _lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): _lowerCAmelCase : Dict = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ ) _lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ ) self.assertEqual(config.__class__.__name__ , 'NewModelConfig' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(snake_case__ ) _lowerCAmelCase : Any = AutoConfig.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' ) def a ( self ): '''simple docstring''' class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "new-model" try: AutoConfig.register('new-model' , snake_case__ ) # If remote code is not set, the default is to use local _lowerCAmelCase : List[Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' ) self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' ) # If remote code is disabled, we load the local one. _lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ ) self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' ) # If remote is enabled, we load from the Hub _lowerCAmelCase : Any = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ ) self.assertEqual(config.__class__.__name__ , 'NewModelConfig' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
444
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = tempfile.mkdtemp() _lowerCAmelCase : List[str] = 5 # Realm tok _lowerCAmelCase : str = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) _lowerCAmelCase : Dict = os.path.join(snake_case__ , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , 'realm_block_records' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) def a ( self ): '''simple docstring''' return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) ) def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = RealmConfig(num_block_records=self.num_block_records ) return config def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], } ) return dataset def a ( self ): '''simple docstring''' _lowerCAmelCase : str = np.array( [ b'This is the first record', b'This is the second record', b'This is the third record', b'This is the fourth record', b'This is the fifth record', b'This is a longer longer longer record', ] , dtype=snake_case__ , ) return block_records def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.get_config() _lowerCAmelCase : List[Any] = self.get_dummy_retriever() _lowerCAmelCase : Any = retriever.tokenizer _lowerCAmelCase : Tuple = np.array([0, 3] , dtype='long' ) _lowerCAmelCase : Dict = tokenizer(['Test question'] ).input_ids _lowerCAmelCase : int = tokenizer( ['the fourth'] , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , ).input_ids _lowerCAmelCase : Tuple = config.reader_seq_len _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = retriever( snake_case__ , snake_case__ , answer_ids=snake_case__ , max_length=snake_case__ , return_tensors='np' ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_config() _lowerCAmelCase : int = self.get_dummy_retriever() _lowerCAmelCase : List[str] = retriever.tokenizer _lowerCAmelCase : List[Any] = np.array([0, 3, 5] , dtype='long' ) _lowerCAmelCase : str = tokenizer(['Test question'] ).input_ids _lowerCAmelCase : Union[str, Any] = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , ).input_ids _lowerCAmelCase : Optional[int] = config.reader_seq_len _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = retriever( snake_case__ , snake_case__ , answer_ids=snake_case__ , max_length=snake_case__ , return_tensors='np' ) self.assertEqual([False, True, True] , snake_case__ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case__ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) # Test local path _lowerCAmelCase : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) self.assertEqual(retriever.block_records[0] , b'This is the first record' ) # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download: _lowerCAmelCase : Any = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME ) _lowerCAmelCase : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' ) self.assertEqual(retriever.block_records[0] , b'This is the first record' )
444
1
'''simple docstring''' import math def UpperCamelCase__ ( _lowercase : float , _lowercase : float ) -> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 3_6_0: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(_lowercase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
706
'''simple docstring''' def UpperCamelCase__ ( _lowercase : list[int] , _lowercase : list[int] ) -> None: __UpperCAmelCase: Dict = len(_lowercase ) print("""The following activities are selected:""" ) # The first activity is always selected __UpperCAmelCase: Dict = 0 print(_lowercase , end=""",""" ) # Consider rest of the activities for j in range(_lowercase ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(_lowercase , end=""",""" ) __UpperCAmelCase: Optional[int] = j if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE_ = [1, 3, 0, 5, 8, 5] SCREAMING_SNAKE_CASE_ = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
466
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class UpperCAmelCase ( _lowercase ): UpperCAmelCase : List[Any] = '''roformer''' def __init__(self : List[str] , A__ : List[str]=5_0_0_0_0 , A__ : List[Any]=None , A__ : Optional[Any]=7_6_8 , A__ : List[Any]=1_2 , A__ : Any=1_2 , A__ : str=3_0_7_2 , A__ : Optional[Any]="gelu" , A__ : Any=0.1 , A__ : Dict=0.1 , A__ : int=1_5_3_6 , A__ : Dict=2 , A__ : Tuple=0.0_2 , A__ : Tuple=1e-12 , A__ : Optional[int]=0 , A__ : Dict=False , A__ : Optional[Any]=True , **A__ : List[str] , ) -> str: super().__init__(pad_token_id=A__ , **A__ ) lowercase = vocab_size lowercase = hidden_size if embedding_size is None else embedding_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = type_vocab_size lowercase = initializer_range lowercase = layer_norm_eps lowercase = rotary_value lowercase = use_cache class UpperCAmelCase ( _lowercase ): @property def UpperCAmelCase__ (self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase = {0: "batch", 1: "sequence"} lowercase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
310
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowerCamelCase : List[str] = { "susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json", "susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json", } class UpperCAmelCase ( _lowercase ): UpperCAmelCase : Any = '''ernie_m''' UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__(self : List[str] , A__ : int = 2_5_0_0_0_2 , A__ : int = 7_6_8 , A__ : int = 1_2 , A__ : int = 1_2 , A__ : int = 3_0_7_2 , A__ : str = "gelu" , A__ : float = 0.1 , A__ : float = 0.1 , A__ : int = 5_1_4 , A__ : float = 0.0_2 , A__ : int = 1 , A__ : float = 1e-05 , A__ : int=None , A__ : Tuple=False , A__ : List[Any]=0.0 , **A__ : List[Any] , ) -> List[Any]: super().__init__(pad_token_id=A__ , **A__ ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = initializer_range lowercase = layer_norm_eps lowercase = classifier_dropout lowercase = is_decoder lowercase = act_dropout
310
1
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES snake_case_ = """tiny-wmt19-en-ru""" # Build # borrowed from a test snake_case_ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] snake_case_ = dict(zip(vocab, range(len(vocab)))) snake_case_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = Path(tmpdirname) snake_case_ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] snake_case_ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] snake_case_ = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) snake_case_ = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) snake_case_ = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=10_00, tgt_vocab_size=10_00, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) snake_case_ = FSMTForConditionalGeneration(config) print(f'''num of params {tiny_model.num_parameters()}''') # Test snake_case_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""") snake_case_ = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-ru
537
'''simple docstring''' def _lowerCamelCase( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Optional[Any]: A : Tuple = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _lowerCamelCase( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Any: A : str = 0 while b > 0: if b & 1: A : List[Any] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
537
1
'''simple docstring''' import math from collections.abc import Iterator from itertools import takewhile def SCREAMING_SNAKE_CASE ( lowercase_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE ( ): lowercase = 2 while True: if is_prime(lowercase_ ): yield num num += 1 def SCREAMING_SNAKE_CASE ( lowercase_ : int = 200_0000 ): return sum(takewhile(lambda lowercase_ : x < n , prime_generator() ) ) if __name__ == "__main__": print(f'''{solution() = }''')
588
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class __UpperCamelCase (unittest.TestCase ): def _a ( self ) -> List[str]: '''simple docstring''' lowercase = """hf-internal-testing/tiny-random-t5""" lowercase = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) lowercase = tokenizer("""This is me""" , return_tensors="""pt""" ) lowercase = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowercase = model.generate(**_lowerCAmelCase ) lowercase = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_lowerCAmelCase ) lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowercase = model_reloaded.generate(**_lowerCAmelCase ) self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) ) def _a ( self ) -> str: '''simple docstring''' lowercase = """hf-internal-testing/tiny-random-t5""" lowercase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ) lowercase = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_lowerCAmelCase ): model.save_pretrained(_lowerCAmelCase ) lowercase = model.reverse_bettertransformer() model.save_pretrained(_lowerCAmelCase )
588
1
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _SCREAMING_SNAKE_CASE : Union[str, Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _SCREAMING_SNAKE_CASE : Tuple = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' _SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): '''simple docstring''' def __snake_case ( self ): if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , ): A__ : List[Any] = len(references[0] ) if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) A__ : Dict = [[refs[i] for refs in references] for i in range(UpperCamelCase__ )] A__ : Optional[Any] = TER( normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , ) A__ : str = sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
55
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' _lowerCAmelCase = "microsoft/speecht5_tts" _lowerCAmelCase = ( "This is a tool that reads an English text out loud. It takes an input named `text` which should contain the " "text to read (in English) and returns a waveform object containing the sound." ) _lowerCAmelCase = "text_reader" _lowerCAmelCase = SpeechTaProcessor _lowerCAmelCase = SpeechTaForTextToSpeech _lowerCAmelCase = SpeechTaHifiGan _lowerCAmelCase = ["text"] _lowerCAmelCase = ["audio"] def __snake_case ( self ): if self.post_processor is None: A__ : int = '''microsoft/speecht5_hifigan''' super().setup() def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ): A__ : List[Any] = self.pre_processor(text=UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' ) A__ : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' ) A__ : Dict = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def __snake_case ( self , UpperCamelCase__ ): with torch.no_grad(): return self.model.generate_speech(**UpperCamelCase__ ) def __snake_case ( self , UpperCamelCase__ ): with torch.no_grad(): return self.post_processor(UpperCamelCase__ ).cpu().detach()
55
1
'''simple docstring''' def _snake_case ( A = 3 , A = 7 , A = 1000000 ) -> int: lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 for current_denominator in range(1 , limit + 1 ): lowerCAmelCase__ = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: lowerCAmelCase__ = current_numerator lowerCAmelCase__ = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
90
"""simple docstring""" from timeit import timeit def a__ ( __SCREAMING_SNAKE_CASE ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __lowerCAmelCase: str = 0 while number: number &= number - 1 result += 1 return result def a__ ( __SCREAMING_SNAKE_CASE ) -> int: if number < 0: raise ValueError("the value of input must not be negative" ) __lowerCAmelCase: Union[str, Any] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def a__ ( ) -> None: def do_benchmark(__SCREAMING_SNAKE_CASE ) -> None: __lowerCAmelCase: List[Any] = "import __main__ as z" print(F"Benchmark when {number = }:" ) print(F"{get_set_bits_count_using_modulo_operator(__SCREAMING_SNAKE_CASE ) = }" ) __lowerCAmelCase: Optional[int] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__SCREAMING_SNAKE_CASE ) print(F"timeit() runs in {timing} seconds" ) print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__SCREAMING_SNAKE_CASE ) = }" ) __lowerCAmelCase: Optional[int] = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__SCREAMING_SNAKE_CASE , ) print(F"timeit() runs in {timing} seconds" ) for number in (2_5, 3_7, 5_8, 0): do_benchmark(__SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
346
0
from __future__ import annotations import numpy as np def _a ( lowercase__ : np.ndarray ): '''simple docstring''' __A : Any = np.shape(lowercase__ ) if rows != columns: __A : Dict = ( '\'table\' has to be of square shaped array but got a ' f'''{rows}x{columns} array:\n{table}''' ) raise ValueError(lowercase__ ) __A : Optional[Any] = np.zeros((rows, columns) ) __A : Optional[Any] = np.zeros((rows, columns) ) for i in range(lowercase__ ): for j in range(lowercase__ ): __A : Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) __A : Optional[int] = (table[i][j] - total) / upper[j][j] __A : Union[str, Any] = 1 for j in range(lowercase__ , lowercase__ ): __A : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) ) __A : Optional[int] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
705
import math def _a ( lowercase__ : int ): '''simple docstring''' assert isinstance(lowercase__ , lowercase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False SCREAMING_SNAKE_CASE__ : Tuple = range(3 , int(math.sqrt(lowercase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _a ( lowercase__ : List[str] , lowercase__ : Any=1 , **lowercase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = factor * value SCREAMING_SNAKE_CASE__ : Dict = value while not is_prime(lowercase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **lowercase__ ) return value
636
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Tuple = { '''configuration_rag''': ['''RagConfig'''], '''retrieval_rag''': ['''RagRetriever'''], '''tokenization_rag''': ['''RagTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Optional[int] = [ '''RagModel''', '''RagPreTrainedModel''', '''RagSequenceForGeneration''', '''RagTokenForGeneration''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''TFRagModel''', '''TFRagPreTrainedModel''', '''TFRagSequenceForGeneration''', '''TFRagTokenForGeneration''', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys __UpperCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : int = ['''LayoutXLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Any = ['''LayoutXLMTokenizerFast'''] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
4
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A_ = logging.get_logger(__name__) A_ = {"vocab_file": "sentencepiece.bpe.model"} A_ = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, } A_ = { "moussaKam/mbarthez": 1024, "moussaKam/barthez": 1024, "moussaKam/barthez-orangesum-title": 1024, } A_ = "▁" class __lowercase ( _A ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : int="<s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Any="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ) -> None: '''simple docstring''' lowercase = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , ) lowercase = vocab_file lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowerCamelCase ) ) lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} lowercase = len(self.sp_model ) - 1 lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __a ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCamelCase )) + [1] return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1] def __a ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : Optional[int] ) -> Any: '''simple docstring''' return len(self.sp_model ) def __a ( self : List[Any] ) -> Dict: '''simple docstring''' lowercase = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __a ( self : List[Any] , __lowerCamelCase : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase ) def __a ( self : Optional[Any] , __lowerCamelCase : Any ) -> str: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase = self.sp_model.PieceToId(__lowerCamelCase ) return spm_id if spm_id else self.unk_token_id def __a ( self : List[Any] , __lowerCamelCase : str ) -> List[Any]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(__lowerCamelCase ) def __a ( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowercase = [] lowercase = '''''' lowercase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCamelCase ) + token lowercase = True lowercase = [] else: current_sub_tokens.append(__lowerCamelCase ) lowercase = False out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' lowercase = self.__dict__.copy() lowercase = None return state def __setstate__( self : List[str] , __lowerCamelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' lowercase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase = {} lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __a ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase = os.path.join( __lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase , '''wb''' ) as fi: lowercase = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
479
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller A_ = 3 def __UpperCAmelCase ( UpperCAmelCase )-> int: """simple docstring""" print('''Generating primitive root of p''' ) while True: lowercase = random.randrange(3, UpperCAmelCase ) if pow(UpperCAmelCase, 2, UpperCAmelCase ) == 1: continue if pow(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) == 1: continue return g def __UpperCAmelCase ( UpperCAmelCase )-> tuple[tuple[int, int, int, int], tuple[int, int]]: """simple docstring""" print('''Generating prime p...''' ) lowercase = rabin_miller.generate_large_prime(UpperCAmelCase ) # select large prime number. lowercase = primitive_root(UpperCAmelCase ) # one primitive root on modulo p. lowercase = random.randrange(3, UpperCAmelCase ) # private_key -> have to be greater than 2 for safety. lowercase = cryptomath.find_mod_inverse(pow(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ), UpperCAmelCase ) lowercase = (key_size, e_a, e_a, p) lowercase = (key_size, d) return public_key, private_key def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> None: """simple docstring""" if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() lowercase ,lowercase = generate_key(UpperCAmelCase ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt', '''w''' ) as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt', '''w''' ) as fo: fo.write(f'{private_key[0]},{private_key[1]}' ) def __UpperCAmelCase ( )-> None: """simple docstring""" print('''Making key files...''' ) make_key_files('''elgamal''', 2048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
479
1
"""simple docstring""" import re import string import numpy as np import datasets __SCREAMING_SNAKE_CASE = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __SCREAMING_SNAKE_CASE = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __SCREAMING_SNAKE_CASE = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def lowerCamelCase_ ( self :Union[str, Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :str , _lowerCamelCase :Any , _lowerCamelCase :str=None , _lowerCamelCase :Tuple=False , _lowerCamelCase :Dict=False , _lowerCamelCase :Dict=False , ): '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: UpperCamelCase_ : int =np.array([re.sub(_lowerCamelCase , '' , _lowerCamelCase ) for x in predictions] ) UpperCamelCase_ : int =np.array([re.sub(_lowerCamelCase , '' , _lowerCamelCase ) for x in references] ) else: UpperCamelCase_ : int =np.asarray(_lowerCamelCase ) UpperCamelCase_ : str =np.asarray(_lowerCamelCase ) if ignore_case: UpperCamelCase_ : List[str] =np.char.lower(_lowerCamelCase ) UpperCamelCase_ : int =np.char.lower(_lowerCamelCase ) if ignore_punctuation: UpperCamelCase_ : List[Any] =string.punctuation.maketrans('' , '' , string.punctuation ) UpperCamelCase_ : List[str] =np.char.translate(_lowerCamelCase , table=_lowerCamelCase ) UpperCamelCase_ : Tuple =np.char.translate(_lowerCamelCase , table=_lowerCamelCase ) if ignore_numbers: UpperCamelCase_ : Dict =string.digits.maketrans('' , '' , string.digits ) UpperCamelCase_ : str =np.char.translate(_lowerCamelCase , table=_lowerCamelCase ) UpperCamelCase_ : Dict =np.char.translate(_lowerCamelCase , table=_lowerCamelCase ) UpperCamelCase_ : Optional[Any] =predictions == references return {"exact_match": np.mean(_lowerCamelCase ) * 100}
357
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __SCREAMING_SNAKE_CASE = { 'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ 'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegaForCausalLM', 'MegaForMaskedLM', 'MegaForMultipleChoice', 'MegaForQuestionAnswering', 'MegaForSequenceClassification', 'MegaForTokenClassification', 'MegaModel', 'MegaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
357
1
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class snake_case_ : def __init__( self , a_ , a_=1_0_0 , a_=1_3 , a_=3_0 , a_=2 , a_=3 , a_=True , a_=True , a_=3_2 , a_=4 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_0 , a_=0.02 , a_=3 , a_=None , a_=[0, 1, 2, 3] , ): a_ : Dict = parent a_ : Optional[int] = 1_0_0 a_ : Dict = batch_size a_ : str = image_size a_ : Optional[int] = patch_size a_ : Optional[int] = num_channels a_ : int = is_training a_ : Dict = use_labels a_ : Dict = hidden_size a_ : Optional[int] = num_hidden_layers a_ : Any = num_attention_heads a_ : List[Any] = intermediate_size a_ : Union[str, Any] = hidden_act a_ : str = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Optional[int] = type_sequence_label_size a_ : Optional[int] = initializer_range a_ : List[str] = scope a_ : Dict = out_indices a_ : Union[str, Any] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a_ : Tuple = (image_size // patch_size) ** 2 a_ : List[Any] = num_patches + 1 def snake_case_ ( self ): a_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ : Dict = None a_ : Optional[int] = None if self.use_labels: a_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def snake_case_ ( self ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def snake_case_ ( self , a_ , a_ , a_ , a_ ): a_ : Any = BeitModel(config=a_ ) model.to(a_ ) model.eval() a_ : List[Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ ( self , a_ , a_ , a_ , a_ ): a_ : int = BeitForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() a_ : str = model(a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def snake_case_ ( self , a_ , a_ , a_ , a_ ): a_ : Union[str, Any] = self.type_sequence_label_size a_ : Dict = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() a_ : Dict = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a_ : List[Any] = 1 a_ : List[str] = BeitForImageClassification(a_ ) model.to(a_ ) model.eval() a_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a_ : Optional[int] = model(a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case_ ( self , a_ , a_ , a_ , a_ ): a_ : List[Any] = self.num_labels a_ : str = BeitForSemanticSegmentation(a_ ) model.to(a_ ) model.eval() a_ : Union[str, Any] = model(a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a_ : Union[str, Any] = model(a_ , labels=a_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def snake_case_ ( self ): a_ : List[Any] = self.prepare_config_and_inputs() a_ , a_ , a_ , a_ : Dict = config_and_inputs a_ : List[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_ ( a_ ,a_ ,unittest.TestCase ): __lowerCAmelCase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __lowerCAmelCase = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def snake_case_ ( self ): a_ : str = BeitModelTester(self ) a_ : Dict = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=3_7 ) def snake_case_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def snake_case_ ( self ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def snake_case_ ( self ): pass def snake_case_ ( self ): a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : List[Any] = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a_ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ , nn.Linear ) ) def snake_case_ ( self ): a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Any = model_class(a_ ) a_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ : str = [*signature.parameters.keys()] a_ : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) def snake_case_ ( self ): a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def snake_case_ ( self ): a_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a_ ) def snake_case_ ( self ): a_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) def snake_case_ ( self ): a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) def snake_case_ ( self ): if not self.model_tester.is_training: return a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a_ : Any = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(a_ ), BeitForMaskedImageModeling]: continue a_ : Union[str, Any] = model_class(a_ ) model.to(a_ ) model.train() a_ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) a_ : Tuple = model(**a_ ).loss loss.backward() def snake_case_ ( self ): a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a_ : List[Any] = False a_ : int = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(a_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a_ : Optional[int] = model_class(a_ ) model.gradient_checkpointing_enable() model.to(a_ ) model.train() a_ : str = self._prepare_for_class(a_ , a_ , return_labels=a_ ) a_ : Optional[Any] = model(**a_ ).loss loss.backward() def snake_case_ ( self ): a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() a_ : Optional[int] = _config_zero_init(a_ ) for model_class in self.all_model_classes: a_ : Dict = model_class(config=a_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def snake_case_ ( self ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : Any = BeitModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def lowerCAmelCase_ ( ) -> Tuple: a_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def snake_case_ ( self ): a_ : Any = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a_ ) a_ : Tuple = self.default_image_processor a_ : List[Any] = prepare_img() a_ : int = image_processor(images=a_ , return_tensors="pt" ).pixel_values.to(a_ ) # prepare bool_masked_pos a_ : List[str] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(a_ ) # forward pass with torch.no_grad(): a_ : Optional[Any] = model(pixel_values=a_ , bool_masked_pos=a_ ) a_ : List[str] = outputs.logits # verify the logits a_ : Optional[int] = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , a_ ) a_ : str = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(a_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a_ , atol=1e-2 ) ) @slow def snake_case_ ( self ): a_ : List[str] = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a_ ) a_ : Union[str, Any] = self.default_image_processor a_ : Tuple = prepare_img() a_ : Optional[Any] = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): a_ : List[Any] = model(**a_ ) a_ : Optional[int] = outputs.logits # verify the logits a_ : List[str] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , a_ ) a_ : Dict = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) ) a_ : List[Any] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , a_ ) @slow def snake_case_ ( self ): a_ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( a_ ) a_ : Tuple = self.default_image_processor a_ : Optional[Any] = prepare_img() a_ : int = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): a_ : Union[str, Any] = model(**a_ ) a_ : List[str] = outputs.logits # verify the logits a_ : str = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , a_ ) a_ : Tuple = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(a_ ) self.assertTrue(torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) ) a_ : List[str] = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , a_ ) @slow def snake_case_ ( self ): a_ : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a_ : int = model.to(a_ ) a_ : Union[str, Any] = BeitImageProcessor(do_resize=a_ , size=6_4_0 , do_center_crop=a_ ) a_ : Dict = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a_ : Tuple = Image.open(ds[0]["file"] ) a_ : List[str] = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): a_ : Union[str, Any] = model(**a_ ) a_ : int = outputs.logits # verify the logits a_ : Union[str, Any] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , a_ ) a_ : Tuple = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a_ : Dict = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] , device=a_ , ) else: a_ : int = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] , device=a_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-4 ) ) @slow def snake_case_ ( self ): a_ : Tuple = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a_ : Union[str, Any] = model.to(a_ ) a_ : int = BeitImageProcessor(do_resize=a_ , size=6_4_0 , do_center_crop=a_ ) a_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a_ : Union[str, Any] = Image.open(ds[0]["file"] ) a_ : Dict = image_processor(images=a_ , return_tensors="pt" ).to(a_ ) # forward pass with torch.no_grad(): a_ : Optional[Any] = model(**a_ ) a_ : List[str] = outputs.logits.detach().cpu() a_ : Any = image_processor.post_process_semantic_segmentation(outputs=a_ , target_sizes=[(5_0_0, 3_0_0)] ) a_ : Optional[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , a_ ) a_ : str = image_processor.post_process_semantic_segmentation(outputs=a_ ) a_ : Any = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , a_ )
370
"""simple docstring""" SCREAMING_SNAKE_CASE_ = 0 # The first color of the flag. SCREAMING_SNAKE_CASE_ = 1 # The second color of the flag. SCREAMING_SNAKE_CASE_ = 2 # The third color of the flag. SCREAMING_SNAKE_CASE_ = (red, white, blue) def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list: if not sequence: return [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return list(SCREAMING_SNAKE_CASE__ ) a_ : Dict = 0 a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 a_ : str = 0 while mid <= high: if sequence[mid] == colors[0]: a_ , a_ : int = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: a_ , a_ : List[Any] = sequence[high], sequence[mid] high -= 1 else: a_ : Dict = F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(SCREAMING_SNAKE_CASE__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE_ = input("""Enter numbers separated by commas:\n""").strip() SCREAMING_SNAKE_CASE_ = [int(item.strip()) for item in user_input.split(""",""")] print(F"""{dutch_national_flag_sort(unsorted)}""")
370
1
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" snake_case_ = CLIPTokenizer snake_case_ = CLIPTokenizerFast snake_case_ = True snake_case_ = {} snake_case_ = False def _UpperCamelCase ( self : List[Any] ): """simple docstring""" super().setUp() # fmt: off lowerCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowerCamelCase__ = dict(zip(a_ , range(len(a_ ) ) ) ) lowerCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""] lowerCamelCase__ = {"""unk_token""": """<unk>"""} lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(a_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a_ ) ) def _UpperCamelCase ( self : Any , **a_ : Tuple ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ ) def _UpperCamelCase ( self : Optional[int] , **a_ : Dict ): """simple docstring""" kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def _UpperCamelCase ( self : Optional[int] , a_ : Optional[int] ): """simple docstring""" lowerCamelCase__ = """lower newer""" lowerCamelCase__ = """lower newer""" return input_text, output_text def _UpperCamelCase ( self : Optional[int] ): """simple docstring""" lowerCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase__ = """lower newer""" lowerCamelCase__ = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""] lowerCamelCase__ = tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) lowerCamelCase__ = tokens + [tokenizer.unk_token] lowerCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ ) @require_ftfy def _UpperCamelCase ( self : int ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = self.tokenizer_class.from_pretrained(a_ , **a_ ) lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_ , **a_ ) lowerCamelCase__ = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d.""" lowerCamelCase__ = tokenizer_s.tokenize(a_ ) lowerCamelCase__ = tokenizer_r.tokenize(a_ ) self.assertListEqual(a_ , a_ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways lowerCamelCase__ = """xa\u0303y""" + """ """ + """x\xe3y""" lowerCamelCase__ = tokenizer_s.tokenize(a_ ) lowerCamelCase__ = tokenizer_r.tokenize(a_ ) self.assertListEqual(a_ , a_ ) # Test that the tokenization is identical on unicode of space type lowerCamelCase__ = [ """\u0009""", # (horizontal tab, '\t') """\u000B""", # (vertical tab) """\u000C""", # (form feed) """\u0020""", # (space, ' ') """\u200E""", # (left-to-right mark):w """\u200F""", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: lowerCamelCase__ = tokenizer_s.tokenize(a_ ) lowerCamelCase__ = tokenizer_r.tokenize(a_ ) self.assertListEqual(a_ , a_ ) # Test that the tokenization is identical on unicode of line break type lowerCamelCase__ = [ """\u000A""", # (line feed, '\n') """\r\n""", # (carriage return and line feed, '\r\n') """\u000D""", # (carriage return, '\r') """\r""", # (carriage return, '\r') """\u000D""", # (carriage return, '\r') """\u2028""", # (line separator) """\u2029""", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: lowerCamelCase__ = tokenizer_s.tokenize(a_ ) lowerCamelCase__ = tokenizer_r.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def _UpperCamelCase ( self : Any ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` lowerCamelCase__ = F'''{text_of_1_token} {text_of_1_token}''' lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( a_ , use_fast=a_ , ) lowerCamelCase__ = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , ) lowerCamelCase__ = F''' {text}''' lowerCamelCase__ = self.rust_tokenizer_class.from_pretrained( a_ , use_fast=a_ , ) lowerCamelCase__ = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , ) def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(a_ ) as context: self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" ) self.assertTrue( context.exception.args[0].startswith( """The `backend_tokenizer` provided does not match the expected format.""" ) ) @require_ftfy def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" super().test_tokenization_python_rust_equals() def _UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" pass
165
def snake_case (UpperCamelCase : int = 2000000 ): '''simple docstring''' lowerCamelCase__ = [0 for i in range(n + 1 )] lowerCamelCase__ = 1 lowerCamelCase__ = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , UpperCamelCase ): lowerCamelCase__ = 1 lowerCamelCase__ = 0 for i in range(UpperCamelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
165
1
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy __lowercase = logging.get_logger(__name__) __lowercase = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } __lowercase = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } __lowercase = { """jukebox""": 512, } class _A ( _a ): """simple docstring""" UpperCAmelCase : str = VOCAB_FILES_NAMES UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Union[str, Any] = PRETRAINED_LYRIC_TOKENS_SIZES UpperCAmelCase : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=["v3", "v2", "v2"] , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : str="<|endoftext|>" , **__UpperCAmelCase : str , ): a : str = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else unk_token super().__init__( unk_token=__UpperCAmelCase , n_genres=__UpperCAmelCase , version=__UpperCAmelCase , max_n_lyric_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) a : Union[str, Any] = version a : List[Any] = max_n_lyric_tokens a : List[str] = n_genres with open(__UpperCAmelCase , encoding="utf-8") as vocab_handle: a : Optional[Any] = json.load(__UpperCAmelCase) with open(__UpperCAmelCase , encoding="utf-8") as vocab_handle: a : str = json.load(__UpperCAmelCase) with open(__UpperCAmelCase , encoding="utf-8") as vocab_handle: a : Optional[int] = json.load(__UpperCAmelCase) a : List[Any] = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 79: a : Optional[Any] = oov.replace(r"\-'" , r"\-+'") a : List[str] = regex.compile(__UpperCAmelCase) a : Tuple = {v: k for k, v in self.artists_encoder.items()} a : str = {v: k for k, v in self.genres_encoder.items()} a : int = {v: k for k, v in self.lyrics_encoder.items()} @property def __snake_case ( self : Dict): return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def __snake_case ( self : Tuple): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def __snake_case ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]): a : Dict = [self.artists_encoder.get(__UpperCAmelCase , 0) for artist in list_artists] for genres in range(len(__UpperCAmelCase)): a : int = [self.genres_encoder.get(__UpperCAmelCase , 0) for genre in list_genres[genres]] a : List[str] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) a : Dict = [[self.lyrics_encoder.get(__UpperCAmelCase , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Any): return list(__UpperCAmelCase) def __snake_case ( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int , **__UpperCAmelCase : Any): a : Dict = self.prepare_for_tokenization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : str = self._tokenize(__UpperCAmelCase) return artist, genre, lyrics def __snake_case ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : bool = False): for idx in range(len(self.version)): if self.version[idx] == "v3": a : str = artists[idx].lower() a : Optional[int] = [genres[idx].lower()] else: a : Union[str, Any] = self._normalize(artists[idx]) + ".v2" a : int = [ self._normalize(__UpperCAmelCase) + ".v2" for genre in genres[idx].split("_") ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a : List[Any] = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+") a : Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" a : Optional[int] = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase))} a : List[Any] = 0 a : List[Any] = len(__UpperCAmelCase) + 1 a : Any = self.vocab a : Any = {v: k for k, v in self.vocab.items()} a : str = "" else: a : Optional[int] = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+") a : Union[str, Any] = self._run_strip_accents(__UpperCAmelCase) a : Union[str, Any] = lyrics.replace("\\" , "\n") a : List[Any] = self.out_of_vocab.sub("" , __UpperCAmelCase), [], [] return artists, genres, lyrics def __snake_case ( self : str , __UpperCAmelCase : Any): a : Dict = unicodedata.normalize("NFD" , __UpperCAmelCase) a : Any = [] for char in text: a : Union[str, Any] = unicodedata.category(__UpperCAmelCase) if cat == "Mn": continue output.append(__UpperCAmelCase) return "".join(__UpperCAmelCase) def __snake_case ( self : Any , __UpperCAmelCase : str): a : Optional[int] = ( [chr(__UpperCAmelCase) for i in range(ord("a") , ord("z") + 1)] + [chr(__UpperCAmelCase) for i in range(ord("A") , ord("Z") + 1)] + [chr(__UpperCAmelCase) for i in range(ord("0") , ord("9") + 1)] + ["."] ) a : Dict = frozenset(__UpperCAmelCase) a : str = re.compile(r"_+") a : int = "".join([c if c in accepted else "_" for c in text.lower()]) a : Optional[int] = pattern.sub("_" , __UpperCAmelCase).strip("_") return text def __snake_case ( self : Optional[Any] , __UpperCAmelCase : List[str]): return " ".join(__UpperCAmelCase) def __snake_case ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : bool = False): # Convert to TensorType if not isinstance(__UpperCAmelCase , __UpperCAmelCase): a : str = TensorType(__UpperCAmelCase) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.") import tensorflow as tf a : Tuple = tf.constant a : Dict = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") import torch a : Tuple = torch.tensor a : int = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed.") import jax.numpy as jnp # noqa: F811 a : Optional[int] = jnp.array a : str = _is_jax else: a : Optional[Any] = np.asarray a : List[str] = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a : List[Any] = [inputs] if not is_tensor(__UpperCAmelCase): a : Optional[Any] = as_tensor(__UpperCAmelCase) except: # noqa E722 raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length.") return inputs def __call__( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int="" , __UpperCAmelCase : Union[str, Any]="pt"): a : str = [0, 0, 0] a : str = [artist] * len(self.version) a : List[str] = [genres] * len(self.version) a : str = self.tokenize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : Dict = self._convert_token_to_id(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) a : str = [-INFINITY] * len(full_tokens[-1]) a : Union[str, Any] = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCAmelCase) for i in range(len(self.version)) ] return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks}) def __snake_case ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None): if not os.path.isdir(__UpperCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return a : int = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"]) with open(__UpperCAmelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCAmelCase)) a : Dict = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"]) with open(__UpperCAmelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCAmelCase)) a : Optional[int] = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"]) with open(__UpperCAmelCase , "w" , encoding="utf-8") as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCAmelCase)) return (artists_file, genres_file, lyrics_file) def __snake_case ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int): a : int = self.artists_decoder.get(__UpperCAmelCase) a : Optional[Any] = [self.genres_decoder.get(__UpperCAmelCase) for genre in genres_index] a : List[Any] = [self.lyrics_decoder.get(__UpperCAmelCase) for character in lyric_index] return artist, genres, lyrics
706
"""simple docstring""" def lowercase ( )-> Union[str, Any]: '''simple docstring''' a : Tuple = 0 for i in range(1 , 1_001 ): total += i**i return str(A_ )[-10:] if __name__ == "__main__": print(solution())
135
0
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase__ = 256 class a ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase_ : Union[str, Any] = ['melgan'] def __init__( self : Dict , lowerCamelCase__ : SpectrogramNotesEncoder , lowerCamelCase__ : SpectrogramContEncoder , lowerCamelCase__ : TaFilmDecoder , lowerCamelCase__ : DDPMScheduler , lowerCamelCase__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: """simple docstring""" super().__init__() # From MELGAN __lowercase = math.log(1e-5 ) # Matches MelGAN training. __lowercase = 4.0 # Largest value for most examples __lowercase = 128 self.register_modules( notes_encoder=lowerCamelCase__ , continuous_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ , scheduler=lowerCamelCase__ , melgan=lowerCamelCase__ , ) def UpperCAmelCase_ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=(-1.0, 1.0) , lowerCamelCase__ : Tuple=False ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = output_range if clip: __lowercase = torch.clip(lowerCamelCase__ , self.min_value , self.max_value ) # Scale to [0, 1]. __lowercase = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict=(-1.0, 1.0) , lowerCamelCase__ : int=False ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = input_range __lowercase = torch.clip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) if clip else outputs # Scale to [0, 1]. __lowercase = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) -> List[str]: """simple docstring""" __lowercase = input_tokens > 0 __lowercase , __lowercase = self.notes_encoder( encoder_input_tokens=lowerCamelCase__ , encoder_inputs_mask=lowerCamelCase__ ) __lowercase , __lowercase = self.continuous_encoder( encoder_inputs=lowerCamelCase__ , encoder_inputs_mask=lowerCamelCase__ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> Tuple: """simple docstring""" __lowercase = noise_time if not torch.is_tensor(lowerCamelCase__ ): __lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0: __lowercase = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __lowercase = self.decoder( encodings_and_masks=lowerCamelCase__ , decoder_input_tokens=lowerCamelCase__ , decoder_noise_time=lowerCamelCase__ ) return logits @torch.no_grad() def __call__( self : Optional[Any] , lowerCamelCase__ : List[List[int]] , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : int = 100 , lowerCamelCase__ : bool = True , lowerCamelCase__ : str = "numpy" , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or callback_steps <= 0) ): raise ValueError( f'`callback_steps` has to be a positive integer but is {callback_steps} of type' f' {type(lowerCamelCase__ )}.' ) __lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __lowercase = np.zeros([1, 0, self.n_dims] , np.floataa ) __lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowerCamelCase__ , device=self.device ) for i, encoder_input_tokens in enumerate(lowerCamelCase__ ): if i == 0: __lowercase = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowerCamelCase__ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __lowercase = ones __lowercase = self.scale_features( lowerCamelCase__ , output_range=[-1.0, 1.0] , clip=lowerCamelCase__ ) __lowercase = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowerCamelCase__ , continuous_mask=lowerCamelCase__ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __lowercase = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowerCamelCase__ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowerCamelCase__ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __lowercase = self.decode( encodings_and_masks=lowerCamelCase__ , input_tokens=lowerCamelCase__ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __lowercase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample __lowercase = self.scale_to_features(lowerCamelCase__ , input_range=[-1.0, 1.0] ) __lowercase = mel[:1] __lowercase = mel.cpu().float().numpy() __lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCamelCase__ , lowerCamelCase__ ) logger.info('''Generated segment''' , lowerCamelCase__ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": __lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __lowercase = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowerCamelCase__ )
332
import requests def _A( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> None: '''simple docstring''' __lowercase = {'''Content-Type''': '''application/json'''} __lowercase = requests.post(UpperCamelCase__ , json={'''text''': message_body} , headers=UpperCamelCase__ ) if response.status_code != 200: __lowercase = ( '''Request to slack returned an error ''' F'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
332
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a: Optional[int] = logging.get_logger(__name__) __a: List[Any] = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = "cvt" def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=[7, 3, 3] , __lowerCAmelCase=[4, 2, 2] , __lowerCAmelCase=[2, 1, 1] , __lowerCAmelCase=[64, 192, 384] , __lowerCAmelCase=[1, 3, 6] , __lowerCAmelCase=[1, 2, 10] , __lowerCAmelCase=[4.0, 4.0, 4.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.0] , __lowerCAmelCase=[0.0, 0.0, 0.1] , __lowerCAmelCase=[True, True, True] , __lowerCAmelCase=[False, False, True] , __lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase=[3, 3, 3] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[2, 2, 2] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=[1, 1, 1] , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , **__lowerCAmelCase , ) -> List[Any]: super().__init__(**__lowerCAmelCase ) lowercase__ : Any = num_channels lowercase__ : str = patch_sizes lowercase__ : Optional[Any] = patch_stride lowercase__ : int = patch_padding lowercase__ : Union[str, Any] = embed_dim lowercase__ : List[Any] = num_heads lowercase__ : int = depth lowercase__ : List[str] = mlp_ratio lowercase__ : Any = attention_drop_rate lowercase__ : List[Any] = drop_rate lowercase__ : int = drop_path_rate lowercase__ : List[Any] = qkv_bias lowercase__ : Optional[Any] = cls_token lowercase__ : Tuple = qkv_projection_method lowercase__ : Union[str, Any] = kernel_qkv lowercase__ : Dict = padding_kv lowercase__ : Union[str, Any] = stride_kv lowercase__ : Optional[Any] = padding_q lowercase__ : Optional[Any] = stride_q lowercase__ : int = initializer_range lowercase__ : Any = layer_norm_eps
428
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __a: List[Any] = logging.getLogger(__name__) class UpperCAmelCase ( a__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = "sequence-classification" def __init__( self , __lowerCAmelCase ) -> Optional[Any]: if type(__lowerCAmelCase ) == dict: lowercase__ : str = Namespace(**__lowerCAmelCase ) lowercase__ : str = glue_output_modes[hparams.task] lowercase__ : Dict = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode ) def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]: return self.model(**__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> int: lowercase__ : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowercase__ : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None lowercase__ : int = self(**__lowerCAmelCase ) lowercase__ : Optional[Any] = outputs[0] lowercase__ : List[str] = self.trainer.lr_schedulers[0]['''scheduler'''] lowercase__ : str = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : int = self.hparams lowercase__ : Tuple = processors[args.task]() lowercase__ : List[str] = processor.get_labels() for mode in ["train", "dev"]: lowercase__ : Union[str, Any] = self._feature_file(__lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __lowerCAmelCase ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) lowercase__ : Union[str, Any] = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) lowercase__ : List[Any] = convert_examples_to_features( __lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , __lowerCAmelCase ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> DataLoader: lowercase__ : Dict = '''dev''' if mode == '''test''' else mode lowercase__ : List[str] = self._feature_file(__lowerCAmelCase ) logger.info('''Loading features from cached file %s''' , __lowerCAmelCase ) lowercase__ : Dict = torch.load(__lowerCAmelCase ) lowercase__ : List[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowercase__ : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowercase__ : str = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowercase__ : Tuple = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any: lowercase__ : Union[str, Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowercase__ : Union[str, Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None lowercase__ : List[Any] = self(**__lowerCAmelCase ) lowercase__ , lowercase__ : int = outputs[:2] lowercase__ : List[str] = logits.detach().cpu().numpy() lowercase__ : Any = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _lowerCAmelCase( self , __lowerCAmelCase ) -> tuple: lowercase__ : List[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() lowercase__ : Dict = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowercase__ : Any = np.argmax(__lowerCAmelCase , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowercase__ : Optional[Any] = np.squeeze(__lowerCAmelCase ) lowercase__ : Optional[Any] = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) lowercase__ : Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )] lowercase__ : int = [[] for _ in range(out_label_ids.shape[0] )] lowercase__ : List[Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase )} lowercase__ : int = dict(results.items() ) lowercase__ : Optional[int] = results return ret, preds_list, out_label_list def _lowerCAmelCase( self , __lowerCAmelCase ) -> dict: lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(__lowerCAmelCase ) lowercase__ : List[Any] = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _lowerCAmelCase( self , __lowerCAmelCase ) -> dict: lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = self._eval_end(__lowerCAmelCase ) lowercase__ : Union[str, Any] = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase ) -> str: BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__lowerCAmelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__lowerCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def __UpperCamelCase ( ): lowercase__ : int = argparse.ArgumentParser() add_generic_args(UpperCAmelCase , os.getcwd() ) lowercase__ : List[Any] = GLUETransformer.add_model_specific_args(UpperCAmelCase , os.getcwd() ) lowercase__ : Tuple = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowercase__ : List[Any] = os.path.join( '''./results''' , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , ) os.makedirs(args.output_dir ) lowercase__ : Optional[int] = GLUETransformer(UpperCAmelCase ) lowercase__ : Any = generic_train(UpperCAmelCase , UpperCAmelCase ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowercase__ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=UpperCAmelCase ) ) lowercase__ : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(UpperCAmelCase ) if __name__ == "__main__": main()
428
1
"""simple docstring""" from manim import * class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ): """simple docstring""" def UpperCAmelCase__( self ) -> Dict: lowercase__ : Optional[int] = Rectangle(height=0.5 , width=0.5 ) lowercase__ : List[Any] = Rectangle(height=0.25 , width=0.25 ) lowercase__ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowercase__ : Dict = [mem.copy() for i in range(6 )] lowercase__ : List[str] = [mem.copy() for i in range(6 )] lowercase__ : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : Union[str, Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : List[str] = Text("""CPU""" , font_size=24 ) lowercase__ : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase__ ) lowercase__ : int = [mem.copy() for i in range(4 )] lowercase__ : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : Optional[int] = Text("""GPU""" , font_size=24 ) lowercase__ : Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase__ ) lowercase__ : Union[str, Any] = [mem.copy() for i in range(6 )] lowercase__ : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : str = Text("""Model""" , font_size=24 ) lowercase__ : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase__ ) lowercase__ : str = [] lowercase__ : Optional[int] = [] lowercase__ : Any = [] for i, rect in enumerate(lowerCamelCase__ ): rect.set_stroke(lowerCamelCase__ ) lowercase__ : List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase__ , buff=0.0 ) self.add(lowerCamelCase__ ) model_cpu_arr.append(lowerCamelCase__ ) self.add(*lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ ) lowercase__ : Dict = [mem.copy() for i in range(6 )] lowercase__ : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : Optional[int] = Text("""Loaded Checkpoint""" , font_size=24 ) lowercase__ : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowerCamelCase__ ) lowercase__ : List[Any] = [] lowercase__ : Optional[int] = [] for i, rect in enumerate(lowerCamelCase__ ): lowercase__ : int = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 ) target.move_to(lowerCamelCase__ ) ckpt_arr.append(lowerCamelCase__ ) lowercase__ : Union[str, Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowerCamelCase__ ) self.add(*lowerCamelCase__ , *lowerCamelCase__ ) lowercase__ : List[str] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowercase__ : Union[str, Any] = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase__ , lowerCamelCase__ ) lowercase__ : Optional[int] = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase__ ) lowercase__ : Tuple = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowercase__ : Dict = [meta_mem.copy() for i in range(6 )] lowercase__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )] lowercase__ : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 ) lowercase__ : Optional[int] = Text("""Disk""" , font_size=24 ) lowercase__ : List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowerCamelCase__ , run_time=3 ) , Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) ) lowercase__ : Any = [] for i, rect in enumerate(lowerCamelCase__ ): lowercase__ : Any = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) ) self.play(*lowerCamelCase__ ) self.play(FadeOut(lowerCamelCase__ ) ) lowercase__ : Optional[Any] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase__ , run_time=3 ) ) self.play( FadeOut(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ ) , ) self.wait()
200
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Path , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , ): if config_name_or_path is None: lowercase__ : Tuple = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: lowercase__ : List[Any] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: lowercase__ : List[Any] = question_encoder_name_or_path lowercase__ : Any = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. lowercase__ : Union[str, Any] = RagConfig.from_pretrained(lowerCamelCase__ ) lowercase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ ) lowercase__ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ ) lowercase__ : Union[str, Any] = gen_config lowercase__ : Any = question_encoder_config lowercase__ : str = model_class.from_pretrained_question_encoder_generator( lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ ) rag_model.save_pretrained(lowerCamelCase__ ) # Sanity check. model_class.from_pretrained(lowerCamelCase__ ) # Save tokenizers. lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) __snake_case = parser.parse_args() __snake_case = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
200
1
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> Any: '''simple docstring''' __A : str = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] __A : str = { 'wmt16-en-de-dist-12-1': [28.3, 27.52], 'wmt16-en-de-dist-6-1': [27.4, 27.11], 'wmt16-en-de-12-1': [26.9, 25.75], } __A : List[str] = F'{src_lang}-{tgt_lang}' __A : List[Any] = F'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=__snake_case , exist_ok=__snake_case ) __A : Dict = os.path.join(__snake_case , 'README.md' ) print(F'Generating {path}' ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(__snake_case ) # make sure we are under the root of the project lowerCamelCase : Any =Path(__file__).resolve().parent.parent.parent lowerCamelCase : Dict =repo_dir / '''model_cards''' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCamelCase : Dict =model_cards_dir / '''allenai''' / model_name write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
707
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Tuple =logging.get_logger(__name__) def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]: '''simple docstring''' __A : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) if "model" in sd.keys(): __A : int = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # pop unnecessary weights __A : str = [ 'decoder.version', 'decoder.output_projection.weight', ] for key in keys_to_delete: if key in sd: sd.pop(_SCREAMING_SNAKE_CASE ) __A : List[str] = { 'decoder.project_in_dim.weight': 'decoder.project_in.weight', 'decoder.project_out_dim.weight': 'decoder.project_out.weight', 'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __A : Any = sd.pop(_SCREAMING_SNAKE_CASE ) __A : Union[str, Any] = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __A : Tuple = sd[key] # We split QKV in separate Q,K,V __A : Any = key.replace('.qkv_proj.' , '.q_proj.' ) __A : Any = key.replace('.qkv_proj.' , '.k_proj.' ) __A : Any = key.replace('.qkv_proj.' , '.v_proj.' ) __A : List[Any] = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __A , __A , __A : List[str] = torch.split(_SCREAMING_SNAKE_CASE , depth // 3 , dim=0 ) __A : Optional[int] = q __A : int = k __A : List[str] = v del sd[key] return sd @torch.no_grad() def _lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]: '''simple docstring''' __A : Dict = load_checkpoint(_SCREAMING_SNAKE_CASE ) if config is not None: __A : Any = OPTConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: __A : Tuple = OPTConfig() __A : Any = OPTModel(_SCREAMING_SNAKE_CASE ).half().eval() model.load_state_dict(_SCREAMING_SNAKE_CASE ) # Check results Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCamelCase : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fairseq_path''', type=str, help=( '''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:''' ''' https://huggingface.co/models?other=opt_metasq''' ), ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''') lowerCamelCase : Dict =parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
237
0
'''simple docstring''' def a ( __a , __a = False ) -> str: '''simple docstring''' if not isinstance(__a , __a ): UpperCamelCase__ :Optional[int] = f'''Expected string as input, found {type(__a )}''' raise ValueError(__a ) if not isinstance(__a , __a ): UpperCamelCase__ :Any = f'''Expected boolean as use_pascal parameter, found {type(__a )}''' raise ValueError(__a ) UpperCamelCase__ :List[Any] = input_str.split('''_''' ) UpperCamelCase__ :Tuple = 0 if use_pascal else 1 UpperCamelCase__ :List[str] = words[start_index:] UpperCamelCase__ :List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] UpperCamelCase__ :List[str] = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
189
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) __snake_case = parser.parse_args() __snake_case = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __snake_case = CLIPImageProcessor() __snake_case = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') __snake_case = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
189
1
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging snake_case : str = logging.get_logger(__name__) snake_case : List[str] = {'vocab_file': 'spiece.model'} snake_case : Tuple = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', } } # TODO(PVP) - this should be removed in Transformers v5 snake_case : Tuple = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } snake_case : int = '▁' class lowerCamelCase__( snake_case_ ): UpperCamelCase : int = VOCAB_FILES_NAMES UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Dict = ["input_ids", "attention_mask"] def __init__( self , __UpperCAmelCase , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase=True , **__UpperCAmelCase , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __lowercase = [F'''<extra_id_{i}>''' for i in range(__UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __lowercase = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) __lowercase = legacy __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__UpperCAmelCase , **__UpperCAmelCase , ) __lowercase = vocab_file __lowercase = extra_ids __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @staticmethod def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: __lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , ) return max_model_length @property def __magic_name__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def __magic_name__ ( self ): """simple docstring""" __lowercase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__UpperCAmelCase )) + [1] return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] def __magic_name__ ( self ): """simple docstring""" return list( set(filter(lambda __UpperCAmelCase : bool(re.search(R"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def __magic_name__ ( self ): """simple docstring""" return [self._convert_token_to_id(__UpperCAmelCase ) for token in self.get_sentinel_tokens()] def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" __lowercase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" __lowercase = self._add_eos_if_not_present(__UpperCAmelCase ) if token_ids_a is None: return token_ids_a else: __lowercase = self._add_eos_if_not_present(__UpperCAmelCase ) return token_ids_a + token_ids_a def __getstate__( self ): """simple docstring""" __lowercase = self.__dict__.copy() __lowercase = None return state def __setstate__( self , __UpperCAmelCase ): """simple docstring""" __lowercase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __lowercase = {} __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self , __UpperCAmelCase , **__UpperCAmelCase ): """simple docstring""" if not self.legacy: __lowercase = SPIECE_UNDERLINE + text.replace(__UpperCAmelCase , """ """ ) return super().tokenize(__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__ ( self , __UpperCAmelCase , **__UpperCAmelCase ): """simple docstring""" if not self.legacy: __lowercase = text.startswith(__UpperCAmelCase ) if is_first: __lowercase = text[1:] __lowercase = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__UpperCAmelCase ): __lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" if token.startswith("""<extra_id_""" ): __lowercase = re.match(R"""<extra_id_(\d+)>""" , __UpperCAmelCase ) __lowercase = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(__UpperCAmelCase ) def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" if index < self.sp_model.get_piece_size(): __lowercase = self.sp_model.IdToPiece(__UpperCAmelCase ) else: __lowercase = F'''<extra_id_{self.vocab_size - 1 - index}>''' return token def __magic_name__ ( self , __UpperCAmelCase ): """simple docstring""" __lowercase = [] __lowercase = """""" __lowercase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase ) + token __lowercase = True __lowercase = [] else: current_sub_tokens.append(__UpperCAmelCase ) __lowercase = False out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ): """simple docstring""" if not os.path.isdir(__UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowercase = os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
708
'''simple docstring''' from __future__ import annotations def lowercase__ ( __UpperCamelCase : list[float] ): '''simple docstring''' __lowercase = 0.00 __lowercase = 0 for resistor in resistors: if resistor <= 0: __lowercase = F'''Resistor at index {index} has a negative or zero value!''' raise ValueError(__UpperCamelCase ) first_sum += 1 / float(__UpperCamelCase ) index += 1 return 1 / first_sum def lowercase__ ( __UpperCamelCase : list[float] ): '''simple docstring''' __lowercase = 0.00 __lowercase = 0 for resistor in resistors: sum_r += resistor if resistor < 0: __lowercase = F'''Resistor at index {index} has a negative value!''' raise ValueError(__UpperCamelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
339
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_a): lowerCamelCase__ : int = ["transformers", "torch", "note_seq"] def __init__( self , *a , **a ) -> int: requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def _UpperCAmelCase ( cls , *a , **a ) -> Optional[int]: requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def _UpperCAmelCase ( cls , *a , **a ) -> int: requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
599
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase : Optional[Any] = { "configuration_blip_2": [ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig", ], "processing_blip_2": ["Blip2Processor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[Any] = [ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2Model", "Blip2QFormerModel", "Blip2PreTrainedModel", "Blip2ForConditionalGeneration", "Blip2VisionModel", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys _UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
599
1
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = OpenAIGPTTokenizer lowerCAmelCase = OpenAIGPTTokenizerFast lowerCAmelCase = True lowerCAmelCase = False def __A ( self : Tuple ) -> Any: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] lowerCAmelCase = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) lowerCAmelCase = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" return "lower newer", "lower newer" def __A ( self : Optional[Any] ) -> List[str]: """simple docstring""" lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) lowerCAmelCase = "lower" lowerCAmelCase = ["low", "er</w>"] lowerCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) lowerCAmelCase = tokens + ["<unk>"] lowerCAmelCase = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __A ( self : Dict , SCREAMING_SNAKE_CASE : str=1_5 ) -> Union[str, Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # Simple input lowerCAmelCase = "This is a simple input" lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"] lowerCAmelCase = ("This is a simple input", "This is a pair") lowerCAmelCase = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , ) def __A ( self : List[Any] ) -> int: """simple docstring""" pass @require_ftfy @require_spacy @require_tokenizers class _lowerCAmelCase ( UpperCamelCase_ ): """simple docstring""" pass
717
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline lowerCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] lowerCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] lowerCAmelCase = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowerCAmelCase = False @property def __A ( self : Dict ) -> Optional[Any]: """simple docstring""" return 3_2 @property def __A ( self : Any ) -> List[str]: """simple docstring""" return 3_2 @property def __A ( self : Optional[Any] ) -> List[str]: """simple docstring""" return self.time_input_dim @property def __A ( self : Optional[int] ) -> Dict: """simple docstring""" return self.time_input_dim * 4 @property def __A ( self : int ) -> List[Any]: """simple docstring""" return 1_0_0 @property def __A ( self : str ) -> Dict: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowerCAmelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE ) return model @property def __A ( self : List[str] ) -> Optional[int]: """simple docstring""" return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __A ( self : Any ) -> Dict: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self : Any ) -> Dict: """simple docstring""" lowerCAmelCase = self.dummy_unet lowerCAmelCase = self.dummy_movq lowerCAmelCase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } lowerCAmelCase = DDIMScheduler(**SCREAMING_SNAKE_CASE ) lowerCAmelCase = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str=0 ) -> Optional[int]: """simple docstring""" lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( SCREAMING_SNAKE_CASE ) # create init_image lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) # create hint lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE ) if str(SCREAMING_SNAKE_CASE ).startswith("mps" ): lowerCAmelCase = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) lowerCAmelCase = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def __A ( self : Dict ) -> List[str]: """simple docstring""" lowerCAmelCase = "cpu" lowerCAmelCase = self.get_dummy_components() lowerCAmelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE ) lowerCAmelCase = pipe.to(SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) ) lowerCAmelCase = output.images lowerCAmelCase = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowerCAmelCase = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __A ( self : Tuple ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : int ) -> Optional[int]: """simple docstring""" lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) lowerCAmelCase = init_image.resize((5_1_2, 5_1_2) ) lowerCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) lowerCAmelCase = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE ) ).float() / 2_5_5.0 lowerCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowerCAmelCase = "A robot, 4k photo" lowerCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(SCREAMING_SNAKE_CASE ) lowerCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) lowerCAmelCase = pipeline.to(SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE ) lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCAmelCase , lowerCAmelCase = pipe_prior( SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.8_5 , generator=SCREAMING_SNAKE_CASE , negative_prompt="" , ).to_tuple() lowerCAmelCase = pipeline( image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , hint=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , ) lowerCAmelCase = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
159
0
from __future__ import annotations def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
81
from queue import PriorityQueue from typing import Any import numpy as np def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ): for nxt, d in graph[v]: if nxt in visited_forward: continue _lowercase: Dict = cst_fwd.get(__magic_name__ , np.inf ) _lowercase: Any = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _lowercase: Optional[Any] = new_cost_f _lowercase: Union[str, Any] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _lowercase: List[Any] = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): _lowercase: List[Any] = -1 _lowercase: Union[str, Any] = set() _lowercase: Any = set() _lowercase: Tuple = {source: 0} _lowercase: Tuple = {destination: 0} _lowercase: Optional[Any] = {source: None} _lowercase: Any = {destination: None} _lowercase: PriorityQueue[Any] = PriorityQueue() _lowercase: PriorityQueue[Any] = PriorityQueue() _lowercase: Optional[int] = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _lowercase , _lowercase: Optional[Any] = queue_forward.get() visited_forward.add(__magic_name__ ) _lowercase , _lowercase: List[str] = queue_backward.get() visited_backward.add(__magic_name__ ) _lowercase: Optional[int] = pass_and_relaxation( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) _lowercase: List[str] = pass_and_relaxation( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _lowercase: Optional[Any] = shortest_distance return shortest_path_distance _SCREAMING_SNAKE_CASE : Optional[Any] = { 'B': [['C', 1]], 'C': [['D', 1]], 'D': [['F', 1]], 'E': [['B', 1], ['G', 2]], 'F': [], 'G': [['F', 1]], } _SCREAMING_SNAKE_CASE : Tuple = { 'B': [['E', 1]], 'C': [['B', 1]], 'D': [['C', 1]], 'F': [['D', 1], ['G', 1]], 'E': [[None, np.inf]], 'G': [['E', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
226
0
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _a ( UpperCamelCase_ ): """simple docstring""" A = (DDIMParallelScheduler,) A = (('eta', 0.0), ('num_inference_steps', 50)) def __a ( self ,**__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def __a ( self ,**__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = scheduler_class(**__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = 10, 0.0 SCREAMING_SNAKE_CASE : Tuple = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Optional[Any] = model(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ).prev_sample return sample def __a ( self ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def __a ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(steps_offset=1 ) SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) ) def __a ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE ,beta_end=__SCREAMING_SNAKE_CASE ) def __a ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def __a ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def __a ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def __a ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=__SCREAMING_SNAKE_CASE ) def __a ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=__SCREAMING_SNAKE_CASE ) def __a ( self ): self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE ,prediction_type=__SCREAMING_SNAKE_CASE ,sample_max_value=__SCREAMING_SNAKE_CASE ,) def __a ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def __a ( self ): for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ): self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ,num_inference_steps=__SCREAMING_SNAKE_CASE ) def __a ( self ): for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ): self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ,eta=__SCREAMING_SNAKE_CASE ) def __a ( self ): SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : str = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.1_4771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.3_2460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5 def __a ( self ): SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Dict = 10, 0.0 scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = self.dummy_model() SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample_deter + 0.1 SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter - 0.1 SCREAMING_SNAKE_CASE : List[Any] = samplea.shape[0] SCREAMING_SNAKE_CASE : Any = torch.stack([samplea, samplea, samplea] ,dim=0 ) SCREAMING_SNAKE_CASE : List[str] = torch.arange(__SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 ,__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Optional[Any] = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) ) SCREAMING_SNAKE_CASE : List[Any] = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4982 ) < 1e-3 def __a ( self ): SCREAMING_SNAKE_CASE : Any = self.full_loop() SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 172.0067 ) < 1e-2 assert abs(result_mean.item() - 0.22_3967 ) < 1e-3 def __a ( self ): SCREAMING_SNAKE_CASE : Any = self.full_loop(prediction_type='v_prediction' ) SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 52.5302 ) < 1e-2 assert abs(result_mean.item() - 0.0684 ) < 1e-3 def __a ( self ): # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE : int = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE ,beta_start=0.01 ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 149.8295 ) < 1e-2 assert abs(result_mean.item() - 0.1951 ) < 1e-3 def __a ( self ): # We specify different beta, so that the first alpha is 0.99 SCREAMING_SNAKE_CASE : Any = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE ,beta_start=0.01 ) SCREAMING_SNAKE_CASE : int = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 149.0784 ) < 1e-2 assert abs(result_mean.item() - 0.1941 ) < 1e-3
719
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch __UpperCAmelCase = logging.get_logger(__name__) class _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" A = ['pixel_values'] def __init__( self ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = 1 / 255 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = True ,**__SCREAMING_SNAKE_CASE ,): super().__init__(**__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'shortest_edge': 224} SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 256, 'width': 256} SCREAMING_SNAKE_CASE : str = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' ) SCREAMING_SNAKE_CASE : str = do_resize SCREAMING_SNAKE_CASE : Dict = size SCREAMING_SNAKE_CASE : int = resample SCREAMING_SNAKE_CASE : List[str] = do_rescale SCREAMING_SNAKE_CASE : List[Any] = rescale_factor SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop SCREAMING_SNAKE_CASE : Any = crop_size SCREAMING_SNAKE_CASE : List[str] = do_flip_channel_order def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = PIL.Image.BILINEAR ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,): SCREAMING_SNAKE_CASE : int = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" ) SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=size['shortest_edge'] ,default_to_square=__SCREAMING_SNAKE_CASE ) return resize(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,): SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(__SCREAMING_SNAKE_CASE ,size=(size['height'], size['width']) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,**__SCREAMING_SNAKE_CASE ,): return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ): return flip_channel_order(__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = ChannelDimension.FIRST ,**__SCREAMING_SNAKE_CASE ,): SCREAMING_SNAKE_CASE : Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE : Union[str, Any] = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE : Dict = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE : Tuple = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='crop_size' ) SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE : Dict = [self.rescale(image=__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: SCREAMING_SNAKE_CASE : Optional[int] = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images] SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images] SCREAMING_SNAKE_CASE : Optional[int] = {'pixel_values': images} return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE ) def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ): SCREAMING_SNAKE_CASE : Tuple = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(__SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy() SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(__SCREAMING_SNAKE_CASE ) ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=__SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = logits.argmax(dim=1 ) SCREAMING_SNAKE_CASE : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
220
0
import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''): _lowerCamelCase : List[Any] = True from torch.cuda.amp import autocast _lowerCamelCase : List[str] = logging.getLogger(__name__) def A__ ( __A : Optional[Any]=None , __A : Optional[int]=None ) ->List[Any]: return field(default_factory=lambda: default , metadata=snake_case__ ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowercase_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) lowercase_ = field( default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} ) lowercase_ = field( default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} ) lowercase_ = field( default=0.1 , metadata={ """help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.""" } , ) lowercase_ = field( default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , ) lowercase_ = field( default=0.05 , metadata={ """help""": ( """Propability of each feature vector along the time axis to be chosen as the start of the vector""" """span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature""" """vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.""" ) } , ) lowercase_ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowercase_ = field( default="""train+validation""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowercase_ = field( default=_SCREAMING_SNAKE_CASE , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of validation examples to this """ """value if set.""" ) } , ) lowercase_ = list_field( default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowercase_ = 42 lowercase_ = True lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None def __call__( self , lowercase__ ): '''simple docstring''' __A =[{"""input_values""": feature["""input_values"""]} for feature in features] __A =[{"""input_ids""": feature["""labels"""]} for feature in features] __A =self.processor.pad( lowerCAmelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) __A =self.processor.pad( labels=lowerCAmelCase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , ) # replace padding with -100 to ignore loss correctly __A =labels_batch["""input_ids"""].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 ) __A =labels return batch class lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def __UpperCamelCase ( self , lowercase__ , lowercase__ ): '''simple docstring''' model.train() __A =self._prepare_inputs(lowerCAmelCase_ ) if self.use_amp: with autocast(): __A =self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ ) else: __A =self.compute_loss(lowerCAmelCase_ , lowerCAmelCase_ ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": __A =loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __A =loss.sum() / (inputs["""labels"""] >= 0).sum() else: raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: __A =loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCAmelCase_ ).backward() elif self.use_apex: with amp.scale_loss(lowerCAmelCase_ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCAmelCase_ ) else: loss.backward() return loss.detach() def A__ ( ) ->Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __A =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __A =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __A =parser.parse_args_into_dataclasses() # Detecting last checkpoint. __A =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __A =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , snake_case__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: __A =datasets.load_dataset( '''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name ) __A =datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' ) # Create and save tokenizer __A =F'''[{''.join(data_args.chars_to_ignore )}]''' def remove_special_characters(__A : List[Any] ): __A =re.sub(snake_case__ , '''''' , batch['''sentence'''] ).lower() + """ """ return batch __A =train_dataset.map(snake_case__ , remove_columns=['''sentence'''] ) __A =eval_dataset.map(snake_case__ , remove_columns=['''sentence'''] ) def extract_all_chars(__A : Tuple ): __A =""" """.join(batch['''text'''] ) __A =list(set(snake_case__ ) ) return {"vocab": [vocab], "all_text": [all_text]} __A =train_dataset.map( snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , ) __A =train_dataset.map( snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , ) __A =list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) ) __A ={v: k for k, v in enumerate(snake_case__ )} __A =vocab_dict[""" """] del vocab_dict[" "] __A =len(snake_case__ ) __A =len(snake_case__ ) with open('''vocab.json''' , '''w''' ) as vocab_file: json.dump(snake_case__ , snake_case__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __A =WavaVecaCTCTokenizer( '''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , ) __A =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ ) __A =WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ ) __A =WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: __A =min(len(snake_case__ ) , data_args.max_train_samples ) __A =train_dataset.select(range(snake_case__ ) ) if data_args.max_val_samples is not None: __A =eval_dataset.select(range(data_args.max_val_samples ) ) __A =torchaudio.transforms.Resample(4_80_00 , 1_60_00 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(__A : Any ): __A =torchaudio.load(batch['''path'''] ) __A =resampler(snake_case__ ).squeeze().numpy() __A =1_60_00 __A =batch["""text"""] return batch __A =train_dataset.map( snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) __A =eval_dataset.map( snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(__A : Tuple ): # check that all files have the correct sampling rate assert ( len(set(batch['''sampling_rate'''] ) ) == 1 ), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.''' __A =processor( audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] ) batch.update(snake_case__ ) return batch __A =train_dataset.map( snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , ) __A =eval_dataset.map( snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , ) # Metric __A =datasets.load_metric('''wer''' ) def compute_metrics(__A : Union[str, Any] ): __A =pred.predictions __A =np.argmax(snake_case__ , axis=-1 ) __A =processor.tokenizer.pad_token_id __A =processor.batch_decode(snake_case__ ) # we do not want to group tokens when computing the metrics __A =processor.batch_decode(pred.label_ids , group_tokens=snake_case__ ) __A =wer_metric.compute(predictions=snake_case__ , references=snake_case__ ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator __A =DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ ) # Initialize our Trainer __A =CTCTrainer( model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: __A =last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): __A =model_args.model_name_or_path else: __A =None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) __A =trainer.train(resume_from_checkpoint=snake_case__ ) trainer.save_model() __A =train_result.metrics __A =( data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ ) ) __A =min(snake_case__ , len(snake_case__ ) ) trainer.log_metrics('''train''' , snake_case__ ) trainer.save_metrics('''train''' , snake_case__ ) trainer.save_state() # Evaluation __A ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __A =trainer.evaluate() __A =data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ ) __A =min(snake_case__ , len(snake_case__ ) ) trainer.log_metrics('''eval''' , snake_case__ ) trainer.save_metrics('''eval''' , snake_case__ ) return results if __name__ == "__main__": main()
184
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase = { "configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase = [ "TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimesformerModel", "TimesformerForVideoClassification", "TimesformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
180
0
def A ( _lowerCamelCase ): '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(_lowerCamelCase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
658
def A ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : int = len(_lowerCamelCase ) for i in range(1 , _lowerCamelCase ): _lowerCAmelCase : List[Any] = collection[i] _lowerCAmelCase : str = 0 _lowerCAmelCase : Union[str, Any] = i - 1 while low <= high: _lowerCAmelCase : List[str] = (low + high) // 2 if val < collection[mid]: _lowerCAmelCase : Optional[int] = mid - 1 else: _lowerCAmelCase : List[str] = mid + 1 for j in range(_lowerCamelCase , _lowerCamelCase , -1 ): _lowerCAmelCase : int = collection[j - 1] _lowerCAmelCase : Optional[int] = val return collection if __name__ == "__main__": _snake_case = input("Enter numbers separated by a comma:\n").strip() _snake_case = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
658
1
"""simple docstring""" def _a ( _snake_case = 200_0000 ): """simple docstring""" UpperCAmelCase = [0 for i in range(n + 1 )] UpperCAmelCase = 1 UpperCAmelCase = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __lowercase ): UpperCAmelCase = 1 UpperCAmelCase = 0 for i in range(__lowercase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"""{solution() = }""")
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
129
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : int ='''deta''' __lowerCAmelCase : Tuple ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self :List[Any], snake_case :Union[str, Any]=None, snake_case :str=900, snake_case :Tuple=2048, snake_case :str=6, snake_case :Union[str, Any]=2048, snake_case :Dict=8, snake_case :str=6, snake_case :int=1024, snake_case :Optional[Any]=8, snake_case :Optional[Any]=0.0, snake_case :Any=True, snake_case :Optional[int]="relu", snake_case :Dict=256, snake_case :Tuple=0.1, snake_case :List[str]=0.0, snake_case :Union[str, Any]=0.0, snake_case :Optional[Any]=0.0_2, snake_case :List[Any]=1.0, snake_case :Optional[Any]=True, snake_case :Optional[int]=False, snake_case :int="sine", snake_case :Union[str, Any]=5, snake_case :List[Any]=4, snake_case :str=4, snake_case :List[str]=True, snake_case :Optional[Any]=300, snake_case :int=True, snake_case :Union[str, Any]=True, snake_case :str=1, snake_case :List[str]=5, snake_case :List[Any]=2, snake_case :Optional[int]=1, snake_case :Optional[Any]=1, snake_case :Any=5, snake_case :Any=2, snake_case :str=0.1, snake_case :Tuple=0.2_5, **snake_case :int, ): """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') _lowercase =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4']) else: if isinstance(snake_case, snake_case): _lowercase =backbone_config.pop('model_type') _lowercase =CONFIG_MAPPING[backbone_model_type] _lowercase =config_class.from_dict(snake_case) _lowercase =backbone_config _lowercase =num_queries _lowercase =max_position_embeddings _lowercase =d_model _lowercase =encoder_ffn_dim _lowercase =encoder_layers _lowercase =encoder_attention_heads _lowercase =decoder_ffn_dim _lowercase =decoder_layers _lowercase =decoder_attention_heads _lowercase =dropout _lowercase =attention_dropout _lowercase =activation_dropout _lowercase =activation_function _lowercase =init_std _lowercase =init_xavier_std _lowercase =encoder_layerdrop _lowercase =auxiliary_loss _lowercase =position_embedding_type # deformable attributes _lowercase =num_feature_levels _lowercase =encoder_n_points _lowercase =decoder_n_points _lowercase =two_stage _lowercase =two_stage_num_proposals _lowercase =with_box_refine _lowercase =assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.') # Hungarian matcher _lowercase =class_cost _lowercase =bbox_cost _lowercase =giou_cost # Loss coefficients _lowercase =mask_loss_coefficient _lowercase =dice_loss_coefficient _lowercase =bbox_loss_coefficient _lowercase =giou_loss_coefficient _lowercase =eos_coefficient _lowercase =focal_alpha super().__init__(is_encoder_decoder=snake_case, **snake_case) @property def UpperCamelCase__ ( self :Dict): """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase__ ( self :str): """simple docstring""" return self.d_model def UpperCamelCase__ ( self :Optional[Any]): """simple docstring""" _lowercase =copy.deepcopy(self.__dict__) _lowercase =self.backbone_config.to_dict() _lowercase =self.__class__.model_type return output
557
def _snake_case (_snake_case : int) -> bool: if p < 2: raise ValueError('p should not be less than 2!') elif p == 2: return True _lowercase =4 _lowercase =(1 << p) - 1 for _ in range(p - 2): _lowercase =((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
557
1
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[Any]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 SCREAMING_SNAKE_CASE__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) SCREAMING_SNAKE_CASE__ = np.array(__snake_case ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ = image[None].transpose(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ = torch.from_numpy(__snake_case ) return 2.0 * image - 1.0 class lowerCamelCase (_SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ) -> Any: super().__init__() self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case ) @torch.no_grad() def __call__( self : Tuple , _snake_case : Union[torch.Tensor, PIL.Image.Image] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[int] = 100 , _snake_case : Optional[float] = 0.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(_snake_case , PIL.Image.Image ): SCREAMING_SNAKE_CASE__ = 1 elif isinstance(_snake_case , torch.Tensor ): SCREAMING_SNAKE_CASE__ = image.shape[0] else: raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}""" ) if isinstance(_snake_case , PIL.Image.Image ): SCREAMING_SNAKE_CASE__ = preprocess(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels // 2, height, width) SCREAMING_SNAKE_CASE__ = next(self.unet.parameters() ).dtype SCREAMING_SNAKE_CASE__ = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case ) SCREAMING_SNAKE_CASE__ = image.to(device=self.device , dtype=_snake_case ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_snake_case , device=self.device ) SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) SCREAMING_SNAKE_CASE__ = {} if accepts_eta: SCREAMING_SNAKE_CASE__ = eta for t in self.progress_bar(_snake_case ): # concat latents and low resolution image in the channel dimension. SCREAMING_SNAKE_CASE__ = torch.cat([latents, image] , dim=1 ) SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(_snake_case , _snake_case ) # predict the noise residual SCREAMING_SNAKE_CASE__ = self.unet(_snake_case , _snake_case ).sample # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE__ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample # decode the image latents with the VQVAE SCREAMING_SNAKE_CASE__ = self.vqvae.decode(_snake_case ).sample SCREAMING_SNAKE_CASE__ = torch.clamp(_snake_case , -1.0 , 1.0 ) SCREAMING_SNAKE_CASE__ = image / 2 + 0.5 SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_snake_case ) if not return_dict: return (image,) return ImagePipelineOutput(images=_snake_case )
159
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a: int = { '''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''], '''tokenization_mvp''': ['''MvpTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: Dict = ['''MvpTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: Tuple = [ '''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MvpForCausalLM''', '''MvpForConditionalGeneration''', '''MvpForQuestionAnswering''', '''MvpForSequenceClassification''', '''MvpModel''', '''MvpPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __a: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
108
0
'''simple docstring''' import os def A_ ( ) -> Optional[int]: """simple docstring""" __A : Optional[Any] = os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) , """num.txt""" ) with open(__SCREAMING_SNAKE_CASE ) as file_hand: return str(sum(int(__SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
700
'''simple docstring''' A__ : List[Any] =[ 'Audio', 'Array2D', 'Array3D', 'Array4D', 'Array5D', 'ClassLabel', 'Features', 'Sequence', 'Value', 'Image', 'Translation', 'TranslationVariableLanguages', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
499
0
'''simple docstring''' from math import sqrt def UpperCamelCase__ ( __magic_name__ : int ) -> int: '''simple docstring''' snake_case__ : Dict = 0 for i in range(1 , int(sqrt(__magic_name__ ) + 1 ) ): if n % i == 0 and i != sqrt(__magic_name__ ): total += i + n // i elif i == sqrt(__magic_name__ ): total += i return total - n def UpperCamelCase__ ( __magic_name__ : int = 1_00_00 ) -> int: '''simple docstring''' snake_case__ : Dict = sum( i for i in range(1 , __magic_name__ ) if sum_of_divisors(sum_of_divisors(__magic_name__ ) ) == i and sum_of_divisors(__magic_name__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
38
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowerCAmelCase__ :Optional[Any] = get_tests_dir('''fixtures''') class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self ) -> Any: """simple docstring""" _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=_SCREAMING_SNAKE_CASE ) as mock_head: _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class __a ( unittest.TestCase ): @classmethod def UpperCAmelCase__ ( cls ) -> Tuple: """simple docstring""" _UpperCAmelCase = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( cls ) -> Tuple: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def UpperCAmelCase__ ( self ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='test-feature-extractor' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( self ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _SCREAMING_SNAKE_CASE , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( self ) -> Tuple: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() _UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
618
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCAmelCase_ : int = None lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} lowerCAmelCase_ : List[Any] = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } lowerCAmelCase_ : str = { "facebook/mbart-large-en-ro": 1_024, "facebook/mbart-large-cc25": 1_024, } # fmt: off lowerCAmelCase_ : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class lowercase ( __lowerCamelCase ): lowerCamelCase_ =VOCAB_FILES_NAMES lowerCamelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ =['input_ids', 'attention_mask'] lowerCamelCase_ =MBartTokenizer lowerCamelCase_ =[] lowerCamelCase_ =[] def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : List[str]="<s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : str="<pad>" , __lowerCAmelCase : Optional[int]="<mask>" , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any , ) -> Any: # Mask token behave like a normal word, i.e. include the space before it lowercase_ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase) if isinstance(__lowerCAmelCase , __lowerCAmelCase) else mask_token super().__init__( vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , ) lowercase_ = vocab_file lowercase_ = False if not self.vocab_file else True lowercase_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens]) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens}) lowercase_ = { lang_code: self.convert_tokens_to_ids(__lowerCAmelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowercase_ = src_lang if src_lang is not None else "en_XX" lowercase_ = self.convert_tokens_to_ids(self._src_lang) lowercase_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def __UpperCAmelCase ( self : int) -> str: return self._src_lang @src_lang.setter def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : str) -> None: lowercase_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None) -> List[int]: lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Optional[int]) -> List[Any]: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") lowercase_ = src_lang lowercase_ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase) lowercase_ = self.convert_tokens_to_ids(__lowerCAmelCase) lowercase_ = tgt_lang_id return inputs def __UpperCAmelCase ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en_XX" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro_RO" , **__lowerCAmelCase : Optional[Any] , ) -> BatchEncoding: lowercase_ = src_lang lowercase_ = tgt_lang return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase) def __UpperCAmelCase ( self : str) -> Optional[int]: return self.set_src_lang_special_tokens(self.src_lang) def __UpperCAmelCase ( self : Any) -> List[Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang) def __UpperCAmelCase ( self : str , __lowerCAmelCase : Tuple) -> None: lowercase_ = self.convert_tokens_to_ids(__lowerCAmelCase) lowercase_ = [] lowercase_ = [self.eos_token_id, self.cur_lang_code] lowercase_ = self.convert_ids_to_tokens(self.prefix_tokens) lowercase_ = self.convert_ids_to_tokens(self.suffix_tokens) lowercase_ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : str) -> None: lowercase_ = self.convert_tokens_to_ids(__lowerCAmelCase) lowercase_ = [] lowercase_ = [self.eos_token_id, self.cur_lang_code] lowercase_ = self.convert_ids_to_tokens(self.prefix_tokens) lowercase_ = self.convert_ids_to_tokens(self.suffix_tokens) lowercase_ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , ) def __UpperCAmelCase ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(__lowerCAmelCase): logger.error(F'Vocabulary path ({save_directory}) should be a directory.') return lowercase_ = os.path.join( __lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCAmelCase): copyfile(self.vocab_file , __lowerCAmelCase) return (out_vocab_file,)
706
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowerCAmelCase_ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class lowercase ( __lowerCamelCase ): lowerCamelCase_ =field(default=__lowerCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} ) lowerCamelCase_ =field( default=__lowerCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowerCamelCase_ =field( default=__lowerCamelCase , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) lowerCamelCase_ =field( default=__lowerCamelCase , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) lowerCamelCase_ =field( default=__lowerCamelCase , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def __UpperCAmelCase ( self : Tuple) -> Optional[int]: lowercase_ = super().to_dict() for k, v in d.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase): lowercase_ = v.to_dict() return d
461
0
'''simple docstring''' __lowercase : List[Any] = { '''meter''': '''m''', '''kilometer''': '''km''', '''megametre''': '''Mm''', '''gigametre''': '''Gm''', '''terametre''': '''Tm''', '''petametre''': '''Pm''', '''exametre''': '''Em''', '''zettametre''': '''Zm''', '''yottametre''': '''Ym''', } # Exponent of the factor(meter) __lowercase : str = { '''m''': 0, '''km''': 3, '''Mm''': 6, '''Gm''': 9, '''Tm''': 12, '''Pm''': 15, '''Em''': 18, '''Zm''': 21, '''Ym''': 24, } def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> float: '''simple docstring''' lowerCamelCase_ : int = from_type.lower().strip('''s''' ) lowerCamelCase_ : Dict = to_type.lower().strip('''s''' ) lowerCamelCase_ : Optional[Any] = UNIT_SYMBOL.get(_lowercase , _lowercase ) lowerCamelCase_ : Any = UNIT_SYMBOL.get(_lowercase , _lowercase ) if from_sanitized not in METRIC_CONVERSION: lowerCamelCase_ : Union[str, Any] = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_lowercase )}""" ) raise ValueError(_lowercase ) if to_sanitized not in METRIC_CONVERSION: lowerCamelCase_ : Union[str, Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(_lowercase )}""" ) raise ValueError(_lowercase ) lowerCamelCase_ : Dict = METRIC_CONVERSION[from_sanitized] lowerCamelCase_ : Optional[int] = METRIC_CONVERSION[to_sanitized] lowerCamelCase_ : str = 1 if from_exponent > to_exponent: lowerCamelCase_ : List[Any] = from_exponent - to_exponent else: lowerCamelCase_ : Optional[Any] = -(to_exponent - from_exponent) return value * pow(10 , _lowercase ) if __name__ == "__main__": from doctest import testmod testmod()
422
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase ): @slow def UpperCAmelCase__ (self ): lowerCamelCase_ : List[Any] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' ) lowerCamelCase_ : Tuple = { '''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute" '''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } lowerCamelCase_ : int = model(A )['''last_hidden_state'''] lowerCamelCase_ : List[Any] = tf.TensorShape((1, 6, 7_6_8) ) self.assertEqual(output.shape , A ) # compare the actual values for a slice. lowerCamelCase_ : Dict = tf.convert_to_tensor( [ [ [0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04], [-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44], [-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
422
1
"""simple docstring""" from __future__ import annotations def __lowercase ( lowerCamelCase_ : list[int] ): return len(set(lowerCamelCase_ ) ) == len(lowerCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values _lowerCamelCase = argparse.ArgumentParser() parser.add_argument('--user', type=str, default='ubuntu') parser.add_argument('--host', type=str, default='localhost') parser.add_argument('--key_path', type=str, default=None) parser.add_argument('--instance', type=str, default='V100:1') parser.add_argument('--provider', type=str, default='cheapest') parser.add_argument('--use_spot', type=bool, default=False) parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py') _lowerCamelCase , _lowerCamelCase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('Cannot specify both BYO and on-demand cluster args') _lowerCamelCase = rh.cluster( name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path} ) else: _lowerCamelCase = rh.cluster( name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) _lowerCamelCase = args.example.rsplit('/', 1)[0] # Set up remote environment cluster.install_packages(['pip:./']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
112
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[Any] = { "vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json", # See all GLPN models at https://huggingface.co/models?filter=glpn } class A__ ( _a ): """simple docstring""" __magic_name__ = 'glpn' def __init__( self , __snake_case=3 , __snake_case=4 , __snake_case=[2, 2, 2, 2] , __snake_case=[8, 4, 2, 1] , __snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , __snake_case=[7, 3, 3, 3] , __snake_case=[4, 2, 2, 2] , __snake_case=[1, 2, 5, 8] , __snake_case=[4, 4, 4, 4] , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=0.1 , __snake_case=1E-6 , __snake_case=6_4 , __snake_case=1_0 , __snake_case=-1 , **__snake_case , ): super().__init__(**_SCREAMING_SNAKE_CASE ) snake_case = num_channels snake_case = num_encoder_blocks snake_case = depths snake_case = sr_ratios snake_case = hidden_sizes snake_case = patch_sizes snake_case = strides snake_case = mlp_ratios snake_case = num_attention_heads snake_case = hidden_act snake_case = hidden_dropout_prob snake_case = attention_probs_dropout_prob snake_case = initializer_range snake_case = drop_path_rate snake_case = layer_norm_eps snake_case = decoder_hidden_size snake_case = max_depth snake_case = head_in_index
550
from functools import reduce __a = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCamelCase__ ( _lowercase = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) ) for i in range(len(_lowercase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
30
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowerCAmelCase__ = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
471
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowercase ( a_ ): _lowerCamelCase : torch.FloatTensor class lowercase ( a_, a_ ): @register_to_config def __init__( self , _snake_case = 6_5536 , _snake_case = None , _snake_case = 2 , _snake_case = 2 , _snake_case = 0 , _snake_case = "fourier" , _snake_case = True , _snake_case = False , _snake_case = 0.0 , _snake_case = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case = "UNetMidBlock1D" , _snake_case = None , _snake_case = (32, 32, 64) , _snake_case = None , _snake_case = 8 , _snake_case = 1 , _snake_case = False , ) -> List[str]: super().__init__() UpperCAmelCase_ : Optional[Any] = sample_size # time if time_embedding_type == "fourier": UpperCAmelCase_ : Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=_snake_case , log=_snake_case , flip_sin_to_cos=_snake_case) UpperCAmelCase_ : int = 2 * block_out_channels[0] elif time_embedding_type == "positional": UpperCAmelCase_ : Optional[Any] = Timesteps( block_out_channels[0] , flip_sin_to_cos=_snake_case , downscale_freq_shift=_snake_case) UpperCAmelCase_ : List[Any] = block_out_channels[0] if use_timestep_embedding: UpperCAmelCase_ : Dict = block_out_channels[0] * 4 UpperCAmelCase_ : List[Any] = TimestepEmbedding( in_channels=_snake_case , time_embed_dim=_snake_case , act_fn=_snake_case , out_dim=block_out_channels[0] , ) UpperCAmelCase_ : int = nn.ModuleList([]) UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Optional[int] = nn.ModuleList([]) UpperCAmelCase_ : Any = None # down UpperCAmelCase_ : Dict = in_channels for i, down_block_type in enumerate(_snake_case): UpperCAmelCase_ : int = output_channel UpperCAmelCase_ : Optional[int] = block_out_channels[i] if i == 0: input_channel += extra_in_channels UpperCAmelCase_ : int = i == len(_snake_case) - 1 UpperCAmelCase_ : Any = get_down_block( _snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(_snake_case) # mid UpperCAmelCase_ : Optional[int] = get_mid_block( _snake_case , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=_snake_case , add_downsample=_snake_case , ) # up UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case)) UpperCAmelCase_ : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: UpperCAmelCase_ : Tuple = out_channels else: UpperCAmelCase_ : int = block_out_channels[0] for i, up_block_type in enumerate(_snake_case): UpperCAmelCase_ : Dict = output_channel UpperCAmelCase_ : Optional[Any] = ( reversed_block_out_channels[i + 1] if i < len(_snake_case) - 1 else final_upsample_channels ) UpperCAmelCase_ : str = i == len(_snake_case) - 1 UpperCAmelCase_ : Union[str, Any] = get_up_block( _snake_case , num_layers=_snake_case , in_channels=_snake_case , out_channels=_snake_case , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(_snake_case) UpperCAmelCase_ : Dict = output_channel # out UpperCAmelCase_ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) UpperCAmelCase_ : Any = get_out_block( out_block_type=_snake_case , num_groups_out=_snake_case , embed_dim=block_out_channels[0] , out_channels=_snake_case , act_fn=_snake_case , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self , _snake_case , _snake_case , _snake_case = True , ) -> Union[UNetaDOutput, Tuple]: UpperCAmelCase_ : Union[str, Any] = timestep if not torch.is_tensor(_snake_case): UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(_snake_case) and len(timesteps.shape) == 0: UpperCAmelCase_ : Tuple = timesteps[None].to(sample.device) UpperCAmelCase_ : Any = self.time_proj(_snake_case) if self.config.use_timestep_embedding: UpperCAmelCase_ : int = self.time_mlp(_snake_case) else: UpperCAmelCase_ : int = timestep_embed[..., None] UpperCAmelCase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) UpperCAmelCase_ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down UpperCAmelCase_ : Optional[Any] = () for downsample_block in self.down_blocks: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = downsample_block(hidden_states=_snake_case , temb=_snake_case) down_block_res_samples += res_samples # 3. mid if self.mid_block: UpperCAmelCase_ : List[Any] = self.mid_block(_snake_case , _snake_case) # 4. up for i, upsample_block in enumerate(self.up_blocks): UpperCAmelCase_ : int = down_block_res_samples[-1:] UpperCAmelCase_ : Tuple = down_block_res_samples[:-1] UpperCAmelCase_ : List[Any] = upsample_block(_snake_case , res_hidden_states_tuple=_snake_case , temb=_snake_case) # 5. post-process if self.out_block: UpperCAmelCase_ : Optional[Any] = self.out_block(_snake_case , _snake_case) if not return_dict: return (sample,) return UNetaDOutput(sample=_snake_case)
471
1
'''simple docstring''' import collections import os import re from pathlib import Path __lowerCAmelCase = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase = re.compile(r'^\s*try:') # Catches a line with else: __lowerCAmelCase = re.compile(r'^\s*else:') def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None: return None _snake_case = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(_SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _snake_case = f.readlines() _snake_case = 0 while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure _snake_case = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: _snake_case = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ): _snake_case = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0] _snake_case = re.findall(R"""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue _snake_case = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: _snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 _snake_case = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. _snake_case = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _snake_case = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _snake_case = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): _snake_case = lines[line_index] if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None: _snake_case = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ ) _snake_case = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None: _snake_case = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ ) _snake_case = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 _snake_case = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _snake_case = [] while ( line_index < len(_SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): _snake_case = lines[line_index] _snake_case = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 _snake_case = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(_SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. _snake_case = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _snake_case = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _snake_case = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): _snake_case = lines[line_index] _snake_case = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 _snake_case = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): def find_duplicates(_SCREAMING_SNAKE_CASE ): return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _snake_case = [] for key in import_dict_objects.keys(): _snake_case = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) _snake_case = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _snake_case = """base imports""" if key == """none""" else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ): _snake_case = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: _snake_case = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) _snake_case = parse_init(_SCREAMING_SNAKE_CASE ) if objects is not None: _snake_case = analyze_results(*_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = [] for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(_SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0: continue _snake_case = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) ) _snake_case = short_path.replace(os.path.sep , """.""" ) submodules.append(_SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue _snake_case = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) ) _snake_case = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(_SCREAMING_SNAKE_CASE ) return submodules __lowerCAmelCase = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def __SCREAMING_SNAKE_CASE ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import _snake_case = direct_transformers_import(_SCREAMING_SNAKE_CASE ) _snake_case = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , """r""" ) as f: _snake_case = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _SCREAMING_SNAKE_CASE ) ) ) _snake_case = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_SCREAMING_SNAKE_CASE ) > 0: _snake_case = """\n""".join(f"""- {module}""" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" f"""{list_of_modules}\n""" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
585
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class _lowerCAmelCase ( __snake_case ): '''simple docstring''' def __lt__(self , UpperCAmelCase ) -> Tuple: return self[-1] < other[-1] def __eq__(self , UpperCAmelCase ) -> Dict: return self[-1] == other[-1] def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): _snake_case = [] # sort into stacks for element in collection: _snake_case = Stack([element] ) _snake_case = bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i != len(_SCREAMING_SNAKE_CASE ): stacks[i].append(_SCREAMING_SNAKE_CASE ) else: stacks.append(_SCREAMING_SNAKE_CASE ) # use a heap-based merge to merge stack efficiently _snake_case = merge(*(reversed(_SCREAMING_SNAKE_CASE ) for stack in stacks) ) return collection if __name__ == "__main__": __lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
585
1
from __future__ import annotations from math import pow, sqrt def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
703
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): _snake_case : Optional[Any] = tempfile.mkdtemp() _snake_case : List[str] = BlipImageProcessor() _snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) _snake_case : Optional[int] = BlipaProcessor(lowercase_ , lowercase_ ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , **lowercase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer def UpperCamelCase ( self , **lowercase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self ): _snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase ( self ): _snake_case : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _snake_case : Dict = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) _snake_case : Dict = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Optional[int] = self.get_image_processor() _snake_case : Optional[int] = self.get_tokenizer() _snake_case : str = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Tuple = self.prepare_image_inputs() _snake_case : Optional[int] = image_processor(lowercase_ , return_tensors="np" ) _snake_case : Any = processor(images=lowercase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase ( self ): _snake_case : Optional[int] = self.get_image_processor() _snake_case : str = self.get_tokenizer() _snake_case : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : List[Any] = "lower newer" _snake_case : Tuple = processor(text=lowercase_ ) _snake_case : Union[str, Any] = tokenizer(lowercase_ , return_token_type_ids=lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase ( self ): _snake_case : str = self.get_image_processor() _snake_case : List[Any] = self.get_tokenizer() _snake_case : Optional[int] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Any = "lower newer" _snake_case : str = self.prepare_image_inputs() _snake_case : List[str] = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCamelCase ( self ): _snake_case : List[str] = self.get_image_processor() _snake_case : Union[str, Any] = self.get_tokenizer() _snake_case : Tuple = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case : Tuple = processor.batch_decode(lowercase_ ) _snake_case : Union[str, Any] = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Optional[int] = self.get_image_processor() _snake_case : Dict = self.get_tokenizer() _snake_case : Dict = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : int = "lower newer" _snake_case : Any = self.prepare_image_inputs() _snake_case : str = processor(text=lowercase_ , images=lowercase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
580
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class A_ ( unittest.TestCase ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __lowerCamelCase : str = parent __lowerCamelCase : Union[str, Any] = batch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = min_resolution __lowerCamelCase : Tuple = max_resolution __lowerCamelCase : Dict = do_resize __lowerCamelCase : List[Any] = size __lowerCamelCase : Tuple = do_normalize __lowerCamelCase : Any = image_mean __lowerCamelCase : List[str] = image_std __lowerCamelCase : List[Any] = do_rescale __lowerCamelCase : str = rescale_factor __lowerCamelCase : Tuple = do_pad def lowerCAmelCase ( self : Dict): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False): if not batched: __lowerCamelCase : Optional[Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image): __lowerCamelCase , __lowerCamelCase : Any = image.size else: __lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2] if w < h: __lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w) __lowerCamelCase : Tuple = self.size['shortest_edge'] elif w > h: __lowerCamelCase : Union[str, Any] = self.size['shortest_edge'] __lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h) else: __lowerCamelCase : List[Any] = self.size['shortest_edge'] __lowerCamelCase : Optional[int] = self.size['shortest_edge'] else: __lowerCamelCase : List[str] = [] for image in image_inputs: __lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0] __lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1] return expected_height, expected_width @require_torch @require_vision class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): _UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : List[str] = DetaImageProcessingTester(self) @property def lowerCAmelCase ( self : Any): return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase ( self : Dict): __lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad')) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size')) def lowerCAmelCase ( self : str): __lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Any): pass def lowerCAmelCase ( self : List[str]): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image) # Test not batched input __lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : str): # Initialize image_processing __lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) # Test not batched input __lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def lowerCAmelCase ( self : int): # Initialize image_processing __lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor) # Test not batched input __lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values __lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def lowerCAmelCase ( self : Optional[Any]): # prepare image and target __lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f: __lowerCamelCase : List[str] = json.loads(f.read()) __lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __lowerCamelCase : Optional[int] = DetaImageProcessor() __lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : int = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify orig_size __lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__)) @slow def lowerCAmelCase ( self : str): # prepare image, target and masks_path __lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f: __lowerCamelCase : Tuple = json.loads(f.read()) __lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic') __lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt') # verify pixel values __lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4)) # verify area __lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__)) # verify boxes __lowerCamelCase : Tuple = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3)) # verify image_id __lowerCamelCase : int = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__)) # verify is_crowd __lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__)) # verify class_labels __lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__)) # verify masks __lowerCamelCase : Optional[Any] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__) # verify orig_size __lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__)) # verify size __lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
652
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict: __lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {} __lowerCamelCase : int = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]: __lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class A_ ( SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,): super().__init__() __lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source') __lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target') __lowerCamelCase : List[Any] = self.get_char_lens(self.src_file) __lowerCamelCase : List[Any] = max_source_length __lowerCamelCase : List[str] = max_target_length assert min(self.src_lens) > 0, F"found empty line in {self.src_file}" __lowerCamelCase : Any = tokenizer __lowerCamelCase : Optional[int] = prefix if n_obs is not None: __lowerCamelCase : Dict = self.src_lens[:n_obs] __lowerCamelCase : str = src_lang __lowerCamelCase : Any = tgt_lang def __len__( self : Tuple): return len(self.src_lens) def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Dict = index + 1 # linecache starts at 1 __lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') __lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n') assert source_line, F"empty source line for index {index}" assert tgt_line, F"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right __lowerCamelCase : Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer ) __lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer __lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right') __lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right') __lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze() __lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int): return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()] def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): __lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch]) __lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch]) __lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch]) __lowerCamelCase : Optional[int] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer.pad_token_id ) __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) __lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Union[str, Any] = { 'input_ids': source_ids, 'attention_mask': source_mask, 'decoder_input_ids': y, } return batch a =getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: __lowerCamelCase : str = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]: with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: __lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ ) __lowerCamelCase : Any = { 'repo_id': str(lowerCamelCase__ ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), 'hostname': str(socket.gethostname() ), } return repo_infos def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List: return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: with open(lowerCamelCase__ , 'wb' ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str: def remove_articles(lowerCamelCase__ ): return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): __lowerCamelCase : Dict = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split() __lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) __lowerCamelCase : Any = sum(common.values() ) if num_same == 0: return 0 __lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) __lowerCamelCase : Dict = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple: return model_prefix.startswith('rag' ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: __lowerCamelCase : Optional[int] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead __lowerCamelCase : List[str] = 'dropout_rate' for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue __lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
652
1
import torch from diffusers import DiffusionPipeline class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase ) def __call__( self ): """simple docstring""" __magic_name__ :Optional[int] = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) __magic_name__ :Optional[int] = 1 __magic_name__ :int = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample __magic_name__ :Optional[Any] = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample __magic_name__ :Tuple = scheduler_output - scheduler_output + torch.ones_like(__lowerCAmelCase ) return result
180
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCamelCase_ : a__ = 42 # setable values a__ = 42 a__ = 42 a__ = None @classmethod def A ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" return cls(common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase ) @dataclass class lowerCamelCase_ ( lowerCamelCase ): a__ = 42 class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase ): a__ = [e.name for e in FlaxKarrasDiffusionSchedulers] a__ = 42 @property def A ( self ): """simple docstring""" return True @register_to_config def __init__( self , __lowerCAmelCase = 1_0_0_0 , __lowerCAmelCase = 0.0001 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = "linear" , __lowerCAmelCase = None , __lowerCAmelCase = "fixed_small" , __lowerCAmelCase = True , __lowerCAmelCase = "epsilon" , __lowerCAmelCase = jnp.floataa , ): """simple docstring""" __magic_name__ :Optional[int] = dtype def A ( self , __lowerCAmelCase = None ): """simple docstring""" if common is None: __magic_name__ :Dict = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution __magic_name__ :Optional[Any] = jnp.array(1.0 , dtype=self.dtype ) __magic_name__ :str = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=__lowerCAmelCase , init_noise_sigma=__lowerCAmelCase , timesteps=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" return sample def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = () ): """simple docstring""" __magic_name__ :int = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 __magic_name__ :List[Any] = (jnp.arange(0 , __lowerCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase , ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Optional[Any] = state.common.alphas_cumprod[t] __magic_name__ :Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __magic_name__ :Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: __magic_name__ :Optional[Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": __magic_name__ :Optional[Any] = jnp.clip(__lowerCAmelCase , a_min=1E-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": __magic_name__ :Dict = jnp.log(jnp.clip(__lowerCAmelCase , a_min=1E-20 ) ) elif variance_type == "fixed_large": __magic_name__ :Tuple = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log __magic_name__ :Optional[Any] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": __magic_name__ :Union[str, Any] = variance __magic_name__ :List[str] = state.common.betas[t] __magic_name__ :Any = (predicted_variance + 1) / 2 __magic_name__ :str = frac * max_log + (1 - frac) * min_log return variance def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , ): """simple docstring""" __magic_name__ :List[str] = timestep if key is None: __magic_name__ :Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: __magic_name__ , __magic_name__ :Dict = jnp.split(__lowerCAmelCase , sample.shape[1] , axis=1 ) else: __magic_name__ :Optional[int] = None # 1. compute alphas, betas __magic_name__ :Any = state.common.alphas_cumprod[t] __magic_name__ :int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) __magic_name__ :Optional[int] = 1 - alpha_prod_t __magic_name__ :Union[str, Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __magic_name__ :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __magic_name__ :Tuple = model_output elif self.config.prediction_type == "v_prediction": __magic_name__ :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ''' ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __magic_name__ :Union[str, Any] = jnp.clip(__lowerCAmelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __magic_name__ :Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t __magic_name__ :Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __magic_name__ :Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): __magic_name__ :Tuple = jax.random.split(__lowerCAmelCase , num=1 ) __magic_name__ :Dict = jax.random.normal(__lowerCAmelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(__lowerCAmelCase , __lowerCAmelCase , predicted_variance=__lowerCAmelCase ) ** 0.5) * noise __magic_name__ :List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) __magic_name__ :int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=__lowerCAmelCase , state=__lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): """simple docstring""" return add_noise_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ): """simple docstring""" return get_velocity_common(state.common , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
180
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class lowercase_ ( lowercase ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : float ) ->float: """simple docstring""" return 0.0 def _a ( a :np.ndarray , a :int ) -> tuple[int | float, int | float]: a = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) a = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def _a ( a :FilterType , a :int ) -> None: a = 512 a = [1] + [0] * (size - 1) a = [filter_type.process(a ) for item in inputs] a = [0] * (samplerate - size) # zero-padding outputs += filler a = np.abs(np.fft.fft(a ) ) a = 20 * np.logaa(a ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds a = get_bounds(a , a ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(a ) plt.show() def _a ( a :FilterType , a :int ) -> None: a = 512 a = [1] + [0] * (size - 1) a = [filter_type.process(a ) for item in inputs] a = [0] * (samplerate - size) # zero-padding outputs += filler a = np.angle(np.fft.fft(a ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(a , -2 * pi ) ) plt.show()
117
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger(__name__) def _a ( a :List[str] , a :Tuple=False ) -> List[Any]: a = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" a = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _a ( a :List[str] , a :int , a :Tuple=False ) -> Any: for i in range(config.num_hidden_layers ): if base_model: a = '''''' else: a = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) a = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict a = in_proj_weight[ : config.hidden_size, : ] a = in_proj_bias[: config.hidden_size] a = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a = in_proj_weight[ -config.hidden_size :, : ] a = in_proj_bias[-config.hidden_size :] def _a ( a :List[Any] ) -> Dict: a = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(a , a ) def _a ( a :Union[str, Any] , a :str , a :List[Any] ) -> str: a = dct.pop(a ) a = val def _a ( ) -> Optional[Any]: a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' a = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def _a ( a :Tuple , a :str , a :Optional[int]=True ) -> Dict: a = ViTConfig() # patch_size if model_name[-1] == "8": a = 8 # set labels if required if not base_model: a = 1_000 a = '''huggingface/label-files''' a = '''imagenet-1k-id2label.json''' a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) ) a = {int(a ): v for k, v in idalabel.items()} a = idalabel a = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: a = 384 a = 1_536 a = 12 a = 6 # load original model from torch hub a = torch.hub.load('''facebookresearch/dino:main''' , a ) original_model.eval() # load state_dict of original model, remove and rename some keys a = original_model.state_dict() if base_model: remove_classification_head_(a ) a = create_rename_keys(a , base_model=a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model if base_model: a = ViTModel(a , add_pooling_layer=a ).eval() else: a = ViTForImageClassification(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by ViTImageProcessor a = ViTImageProcessor() a = image_processor(images=prepare_img() , return_tensors='''pt''' ) a = encoding['''pixel_values'''] a = model(a ) if base_model: a = original_model(a ) assert torch.allclose(a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: a = original_model(a ) assert logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="dino_vitb16", type=str, help="Name of the model trained with DINO you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--base_model", action="store_true", help="Whether to only convert the base model (no projection head weights).", ) parser.set_defaults(base_model=True) UpperCAmelCase__ = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
117
1
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowerCAmelCase : Optional[int] = True except (ImportError, ModuleNotFoundError): __lowerCAmelCase : List[str] = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def lowerCAmelCase ( UpperCamelCase__ : str ): """simple docstring""" re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
654
'''simple docstring''' def lowerCAmelCase ( UpperCamelCase__ : int ): """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase = f"""Input value of [number={number}] must be an integer""" raise TypeError(UpperCamelCase__ ) if number < 1: __UpperCAmelCase = f"""Input value of [number={number}] must be > 0""" raise ValueError(UpperCamelCase__ ) __UpperCAmelCase = 1 for i in range(1 , UpperCamelCase__ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
654
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : Union[str, Any] = { 'configuration_mobilenet_v2': [ 'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileNetV2Config', 'MobileNetV2OnnxConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = ['MobileNetV2FeatureExtractor'] A_ : List[str] = ['MobileNetV2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Union[str, Any] = [ 'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileNetV2ForImageClassification', 'MobileNetV2ForSemanticSegmentation', 'MobileNetV2Model', 'MobileNetV2PreTrainedModel', 'load_tf_weights_in_mobilenet_v2', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
38
'''simple docstring''' from __future__ import annotations from typing import Any class __lowerCAmelCase : '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : int = 6 ): '''simple docstring''' lowercase__ = None lowercase__ = None self.create_linked_list(UpperCamelCase ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : int ): '''simple docstring''' lowercase__ = Node() lowercase__ = current_node lowercase__ = current_node lowercase__ = current_node for _ in range(1 , UpperCamelCase ): lowercase__ = Node() lowercase__ = current_node lowercase__ = previous_node lowercase__ = current_node lowercase__ = self.front lowercase__ = previous_node def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def UpperCamelCase__ (self : Any , UpperCamelCase : Any ): '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): lowercase__ = self.rear.next if self.rear: lowercase__ = data def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: lowercase__ = self.front.data lowercase__ = None return data lowercase__ = self.front lowercase__ = old_front.next lowercase__ = old_front.data lowercase__ = None return data def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' if self.is_empty(): raise Exception('''Empty Queue''' ) def UpperCamelCase__ (self : int ): '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class __lowerCAmelCase : '''simple docstring''' def __init__(self : Any ): '''simple docstring''' lowercase__ = None lowercase__ = None lowercase__ = None if __name__ == "__main__": import doctest doctest.testmod()
460
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ : List[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ : Optional[int] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
718
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _A ( __a , unittest.TestCase ): __a = PhobertTokenizer __a = False def _lowerCamelCase ( self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCamelCase__ = ["T@@", "i", "I", "R@@", "r", "e@@"] lowerCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) lowerCamelCase__ = ["#version: 0.2", "l à</w>"] lowerCamelCase__ = {"unk_token": "<unk>"} lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) ) def _lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple: kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple: lowerCamelCase__ = "Tôi là VinAI Research" lowerCamelCase__ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>" return input_text, output_text def _lowerCamelCase ( self ) -> Tuple: lowerCamelCase__ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCamelCase__ = "Tôi là VinAI Research" lowerCamelCase__ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split() lowerCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCamelCase__ = tokens + [tokenizer.unk_token] lowerCamelCase__ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
274
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @property def __A ( self: Optional[int] ) -> Union[str, Any]: torch.manual_seed(0 ) _A = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self: List[Any] ) -> List[str]: _A = self.dummy_uncond_unet _A = KarrasVeScheduler() _A = KarrasVePipeline(unet=__A , scheduler=__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = torch.manual_seed(0 ) _A = pipe(num_inference_steps=2 , generator=__A , output_type='''numpy''' ).images _A = torch.manual_seed(0 ) _A = pipe(num_inference_steps=2 , generator=__A , output_type='''numpy''' , return_dict=__A )[0] _A = image[0, -3:, -3:, -1] _A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Any ) -> str: _A = '''google/ncsnpp-celebahq-256''' _A = UNetaDModel.from_pretrained(__A ) _A = KarrasVeScheduler() _A = KarrasVePipeline(unet=__A , scheduler=__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) _A = torch.manual_seed(0 ) _A = pipe(num_inference_steps=20 , generator=__A , output_type='''numpy''' ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) _A = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
484
from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "audio-spectrogram-transformer" def __init__( self: Optional[Any] , __A: int=7_68 , __A: Optional[Any]=12 , __A: Tuple=12 , __A: Union[str, Any]=30_72 , __A: str="gelu" , __A: str=0.0 , __A: List[Any]=0.0 , __A: List[str]=0.02 , __A: List[str]=1e-12 , __A: Any=16 , __A: Dict=True , __A: Optional[Any]=10 , __A: Union[str, Any]=10 , __A: str=10_24 , __A: Optional[int]=1_28 , **__A: Tuple , ) -> List[Any]: super().__init__(**__A ) _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = patch_size _A = qkv_bias _A = frequency_stride _A = time_stride _A = max_length _A = num_mel_bins
484
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { 'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['VivitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ 'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'VivitModel', 'VivitPreTrainedModel', 'VivitForVideoClassification', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
515
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg') UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY) def __lowerCamelCase ( ) -> int: __UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase ) # assert negative_img array for at least one True assert negative_img.any() def __lowerCamelCase ( ) -> Optional[Any]: with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def __lowerCamelCase ( ) -> Dict: __UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def __lowerCamelCase ( ) -> str: __UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase ) # assert canny array for at least one True assert canny_array.any() def __lowerCamelCase ( ) -> Optional[int]: assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all() def __lowerCamelCase ( ) -> Tuple: # laplace diagonals __UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase ) assert res.any() def __lowerCamelCase ( ) -> List[str]: assert med.median_filter(__lowerCAmelCase , 3 ).any() def __lowerCamelCase ( ) -> int: __UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase ) assert grad.any() and theta.any() def __lowerCamelCase ( ) -> Optional[int]: __UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 ) assert sepia.all() def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]: __UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str: __UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def __lowerCamelCase ( ) -> Union[str, Any]: __UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. __UpperCamelCase : int = imread(__lowerCAmelCase , 0 ) # Test for get_neighbors_pixel function() return not None __UpperCamelCase : Dict = 0 __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : str = image[x_coordinate][y_coordinate] __UpperCamelCase : Tuple = lbp.get_neighbors_pixel( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) assert lbp_image.any()
515
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : List[Any] = Dict[str, Any] lowerCamelCase : Dict = List[Prediction] @add_end_docstrings(UpperCamelCase ) class A( UpperCamelCase ): '''simple docstring''' def __init__( self : Tuple , *A_ : int , **A_ : int ) -> Optional[int]: """simple docstring""" super().__init__(*A_ , **A_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , 'vision' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def a__ ( self : Union[str, Any] , **A_ : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = {} if "threshold" in kwargs: lowerCamelCase_ = kwargs['threshold'] return {}, {}, postprocess_kwargs def __call__( self : str , *A_ : Optional[int] , **A_ : Tuple ) -> Union[Predictions, List[Prediction]]: """simple docstring""" return super().__call__(*A_ , **A_ ) def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = load_image(A_ ) lowerCamelCase_ = torch.IntTensor([[image.height, image.width]] ) lowerCamelCase_ = self.image_processor(images=[image] , return_tensors='pt' ) if self.tokenizer is not None: lowerCamelCase_ = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' ) lowerCamelCase_ = target_size return inputs def a__ ( self : Union[str, Any] , A_ : List[str] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = model_inputs.pop('target_size' ) lowerCamelCase_ = self.model(**A_ ) lowerCamelCase_ = outputs.__class__({'target_size': target_size, **outputs} ) if self.tokenizer is not None: lowerCamelCase_ = model_inputs['bbox'] return model_outputs def a__ ( self : str , A_ : Any , A_ : Tuple=0.9 ) -> str: """simple docstring""" lowerCamelCase_ = model_outputs['target_size'] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. lowerCamelCase_ , lowerCamelCase_ = target_size[0].tolist() def unnormalize(A_ : Dict ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) lowerCamelCase_ , lowerCamelCase_ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) lowerCamelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] lowerCamelCase_ = [unnormalize(A_ ) for bbox in model_outputs['bbox'].squeeze(0 )] lowerCamelCase_ = ['score', 'label', 'box'] lowerCamelCase_ = [dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel lowerCamelCase_ = self.image_processor.post_process_object_detection(A_ , A_ , A_ ) lowerCamelCase_ = raw_annotations[0] lowerCamelCase_ = raw_annotation['scores'] lowerCamelCase_ = raw_annotation['labels'] lowerCamelCase_ = raw_annotation['boxes'] lowerCamelCase_ = scores.tolist() lowerCamelCase_ = [self.model.config.idalabel[label.item()] for label in labels] lowerCamelCase_ = [self._get_bounding_box(A_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] lowerCamelCase_ = ['score', 'label', 'box'] lowerCamelCase_ = [ dict(zip(A_ , A_ ) ) for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] ) ] return annotation def a__ ( self : Union[str, Any] , A_ : "torch.Tensor" ) -> Dict[str, int]: """simple docstring""" if self.framework != "pt": raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = box.int().tolist() lowerCamelCase_ = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
70
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : int = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''swinv2''' UpperCamelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Dict , A_ : List[Any]=224 , A_ : Optional[Any]=4 , A_ : int=3 , A_ : Dict=96 , A_ : Any=[2, 2, 6, 2] , A_ : Optional[Any]=[3, 6, 12, 24] , A_ : Tuple=7 , A_ : Tuple=4.0 , A_ : str=True , A_ : str=0.0 , A_ : Union[str, Any]=0.0 , A_ : Optional[Any]=0.1 , A_ : str="gelu" , A_ : int=False , A_ : str=0.02 , A_ : List[Any]=1E-5 , A_ : Any=32 , **A_ : Tuple , ) -> Any: """simple docstring""" super().__init__(**A_ ) lowerCamelCase_ = image_size lowerCamelCase_ = patch_size lowerCamelCase_ = num_channels lowerCamelCase_ = embed_dim lowerCamelCase_ = depths lowerCamelCase_ = len(A_ ) lowerCamelCase_ = num_heads lowerCamelCase_ = window_size lowerCamelCase_ = mlp_ratio lowerCamelCase_ = qkv_bias lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = drop_path_rate lowerCamelCase_ = hidden_act lowerCamelCase_ = use_absolute_embeddings lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = initializer_range lowerCamelCase_ = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) ) lowerCamelCase_ = (0, 0, 0, 0)
70
1
import math import tensorflow as tf from packaging import version def _UpperCAmelCase ( A ): '''simple docstring''' UpperCAmelCase__ =tf.convert_to_tensor(A ) UpperCAmelCase__ =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def _UpperCAmelCase ( A ): '''simple docstring''' UpperCAmelCase__ =tf.convert_to_tensor(A ) UpperCAmelCase__ =tf.cast(math.pi , x.dtype ) UpperCAmelCase__ =tf.cast(0.04_47_15 , x.dtype ) UpperCAmelCase__ =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A , 3 )) )) return x * cdf def _UpperCAmelCase ( A ): '''simple docstring''' UpperCAmelCase__ =tf.convert_to_tensor(A ) return x * tf.tanh(tf.math.softplus(A ) ) def _UpperCAmelCase ( A ): '''simple docstring''' UpperCAmelCase__ =tf.convert_to_tensor(A ) UpperCAmelCase__ =tf.cast(0.04_47_15 , x.dtype ) UpperCAmelCase__ =tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def _UpperCAmelCase ( A ): '''simple docstring''' UpperCAmelCase__ =tf.convert_to_tensor(A ) UpperCAmelCase__ =tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def _UpperCAmelCase ( A ): '''simple docstring''' return tf.clip_by_value(_gelu(A ) , -10 , 10 ) def _UpperCAmelCase ( A , A=-1 ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ =tf.split(A , 2 , axis=A ) return a * tf.math.sigmoid(A ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def _UpperCAmelCase ( A ): '''simple docstring''' return tf.keras.activations.gelu(A , approximate=A ) UpperCamelCase_ = tf.keras.activations.gelu UpperCamelCase_ = approximate_gelu_wrap else: UpperCamelCase_ = _gelu UpperCamelCase_ = _gelu_new UpperCamelCase_ = { 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def _UpperCAmelCase ( A ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
510
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup UpperCamelCase_ = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def _UpperCAmelCase ( A ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') UpperCamelCase_ = parser.parse_args() if args.check_lib: UpperCamelCase_ = importlib.import_module('transformers') UpperCamelCase_ = Path(transformers_module.__file__).parent else: UpperCamelCase_ = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
510
1
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets lowerCAmelCase = datasets.logging.get_logger(__name__) lowerCAmelCase = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ lowerCAmelCase = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ lowerCAmelCase = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ lowerCAmelCase = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase ( datasets.Metric ): def A( self): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Value('''string''' , id='''sequence'''), }) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def A( self , lowercase__): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''') __UpperCAmelCase : List[Any] = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: __UpperCAmelCase : int = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __UpperCAmelCase : List[Any] = self.config_name.upper() else: raise KeyError( F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}") # download the model checkpoint specified by self.config_name and set up the scorer __UpperCAmelCase : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name]) __UpperCAmelCase : List[Any] = score.BleurtScorer(os.path.join(lowercase__ , lowercase__)) def A( self , lowercase__ , lowercase__): __UpperCAmelCase : Optional[int] = self.scorer.score(references=lowercase__ , candidates=lowercase__) return {"scores": scores}
462
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ): _lowerCAmelCase : Union[str, Any] = XLNetTokenizer _lowerCAmelCase : int = XLNetTokenizerFast _lowerCAmelCase : Optional[Any] = True _lowerCAmelCase : Optional[Any] = True def A( self): super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Optional[Any] = XLNetTokenizer(lowercase__ , keep_accents=lowercase__) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def A( self): __UpperCAmelCase : Tuple = '''<s>''' __UpperCAmelCase : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__) , lowercase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__) , lowercase__) def A( self): __UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''<eod>''') self.assertEqual(len(lowercase__) , 1_0_0_6) def A( self): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0) def A( self): __UpperCAmelCase : List[str] = XLNetTokenizer(lowercase__ , keep_accents=lowercase__) __UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''This is a test''') self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]) __UpperCAmelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowercase__) self.assertListEqual(lowercase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4]) __UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(lowercase__) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def A( self): __UpperCAmelCase : str = XLNetTokenizer(lowercase__ , do_lower_case=lowercase__) __UpperCAmelCase : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') , ['''▁he''', '''ll''', '''o''']) def A( self): __UpperCAmelCase : Optional[Any] = XLNetTokenizer(lowercase__ , do_lower_case=lowercase__) __UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def A( self): __UpperCAmelCase : Tuple = XLNetTokenizer.from_pretrained('''xlnet-base-cased''') __UpperCAmelCase : Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__) __UpperCAmelCase : Optional[int] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__) __UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__) __UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def A( self): # fmt: off __UpperCAmelCase : Optional[Any] = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
462
1
lowercase__ :List[Any] = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
701
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase__ :Tuple = { "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"], "tokenization_biogpt": ["BioGptTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :Union[str, Any] = [ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST", "BioGptForCausalLM", "BioGptForTokenClassification", "BioGptForSequenceClassification", "BioGptModel", "BioGptPreTrainedModel", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
633
0
'''simple docstring''' import math import random def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : float , UpperCamelCase__ : bool = False ): """simple docstring""" if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCAmelCase_ : Optional[Any] = 0.02 def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" a_ : Tuple = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(UpperCamelCase__ ): # Forward propagation a_ : Dict = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? a_ : Dict = (expected / 100) - layer_a # Error delta a_ : Optional[Any] = layer_1_error * sigmoid_function(UpperCamelCase__ , UpperCamelCase__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ : str = int(input('Expected value: ')) lowerCAmelCase_ : str = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
442
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ : Dict = { 'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ : int = [ 'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'GraphormerForGraphClassification', 'GraphormerModel', 'GraphormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
442
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class __a ( __lowerCamelCase ): """simple docstring""" _A : Union[str, Any] = "nllb-moe" _A : int = ["past_key_values"] _A : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : int ,_UpperCamelCase : int=1_2_8_1_1_2 ,_UpperCamelCase : Optional[Any]=1_0_2_4 ,_UpperCamelCase : Dict=1_2 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : Dict=1_6 ,_UpperCamelCase : Union[str, Any]=1_2 ,_UpperCamelCase : List[Any]=4_0_9_6 ,_UpperCamelCase : str=1_6 ,_UpperCamelCase : Optional[int]=0.05 ,_UpperCamelCase : List[str]=0.05 ,_UpperCamelCase : Optional[int]=True ,_UpperCamelCase : Dict=True ,_UpperCamelCase : Optional[Any]="relu" ,_UpperCamelCase : Any=1_0_2_4 ,_UpperCamelCase : Tuple=0.1 ,_UpperCamelCase : List[Any]=0.1 ,_UpperCamelCase : Optional[Any]=0.0 ,_UpperCamelCase : Union[str, Any]=0.02 ,_UpperCamelCase : Dict=2 ,_UpperCamelCase : Any=True ,_UpperCamelCase : Tuple=False ,_UpperCamelCase : Optional[Any]="float32" ,_UpperCamelCase : Optional[int]=False ,_UpperCamelCase : Any=1_2_8 ,_UpperCamelCase : Optional[int]=6_4 ,_UpperCamelCase : Optional[Any]=4 ,_UpperCamelCase : Tuple=4 ,_UpperCamelCase : Any=0.001 ,_UpperCamelCase : int=0.001 ,_UpperCamelCase : Dict="all" ,_UpperCamelCase : str=False ,_UpperCamelCase : Any=False ,_UpperCamelCase : List[Any]=1.0 ,_UpperCamelCase : Union[str, Any]=0.2 ,_UpperCamelCase : List[str]=1 ,_UpperCamelCase : Union[str, Any]=0 ,_UpperCamelCase : Optional[Any]=2 ,_UpperCamelCase : Optional[int]=False ,**_UpperCamelCase : Any ,) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ =vocab_size SCREAMING_SNAKE_CASE__ =max_position_embeddings SCREAMING_SNAKE_CASE__ =d_model SCREAMING_SNAKE_CASE__ =encoder_ffn_dim SCREAMING_SNAKE_CASE__ =encoder_layers SCREAMING_SNAKE_CASE__ =encoder_attention_heads SCREAMING_SNAKE_CASE__ =decoder_ffn_dim SCREAMING_SNAKE_CASE__ =decoder_layers SCREAMING_SNAKE_CASE__ =decoder_attention_heads SCREAMING_SNAKE_CASE__ =dropout SCREAMING_SNAKE_CASE__ =attention_dropout SCREAMING_SNAKE_CASE__ =activation_dropout SCREAMING_SNAKE_CASE__ =activation_function SCREAMING_SNAKE_CASE__ =init_std SCREAMING_SNAKE_CASE__ =encoder_layerdrop SCREAMING_SNAKE_CASE__ =decoder_layerdrop SCREAMING_SNAKE_CASE__ =use_cache SCREAMING_SNAKE_CASE__ =encoder_layers SCREAMING_SNAKE_CASE__ =scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE__ =router_z_loss_coef SCREAMING_SNAKE_CASE__ =router_aux_loss_coef SCREAMING_SNAKE_CASE__ =decoder_sparse_step SCREAMING_SNAKE_CASE__ =encoder_sparse_step SCREAMING_SNAKE_CASE__ =num_experts SCREAMING_SNAKE_CASE__ =expert_capacity SCREAMING_SNAKE_CASE__ =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) SCREAMING_SNAKE_CASE__ =router_dtype SCREAMING_SNAKE_CASE__ =router_ignore_padding_tokens SCREAMING_SNAKE_CASE__ =batch_prioritized_routing SCREAMING_SNAKE_CASE__ =second_expert_policy SCREAMING_SNAKE_CASE__ =normalize_router_prob_before_dropping SCREAMING_SNAKE_CASE__ =moe_eval_capacity_token_fraction SCREAMING_SNAKE_CASE__ =moe_token_dropout SCREAMING_SNAKE_CASE__ =output_router_logits super().__init__( pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,is_encoder_decoder=_UpperCamelCase ,decoder_start_token_id=_UpperCamelCase ,**_UpperCamelCase ,)
704
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class __a ( __lowerCamelCase ): """simple docstring""" _A : torch.FloatTensor class __a ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self : str ,_UpperCamelCase : int = 3_2 ,_UpperCamelCase : int = 6_4 ,_UpperCamelCase : int = 2_0 ,_UpperCamelCase : int = 7_6_8 ,_UpperCamelCase : Tuple=7_7 ,_UpperCamelCase : List[str]=4 ,_UpperCamelCase : float = 0.0 ,_UpperCamelCase : str = "silu" ,_UpperCamelCase : Optional[str] = None ,_UpperCamelCase : Optional[str] = None ,_UpperCamelCase : Optional[str] = "linear" ,_UpperCamelCase : Optional[str] = "prd" ,_UpperCamelCase : Optional[int] = None ,_UpperCamelCase : Optional[int] = None ,_UpperCamelCase : Optional[int] = None ,) -> int: '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE__ =num_attention_heads SCREAMING_SNAKE_CASE__ =attention_head_dim SCREAMING_SNAKE_CASE__ =num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE__ =additional_embeddings SCREAMING_SNAKE_CASE__ =time_embed_dim or inner_dim SCREAMING_SNAKE_CASE__ =embedding_proj_dim or embedding_dim SCREAMING_SNAKE_CASE__ =clip_embed_dim or embedding_dim SCREAMING_SNAKE_CASE__ =Timesteps(_UpperCamelCase ,_UpperCamelCase ,0 ) SCREAMING_SNAKE_CASE__ =TimestepEmbedding(_UpperCamelCase ,_UpperCamelCase ,out_dim=_UpperCamelCase ,act_fn=_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase ) if embedding_proj_norm_type is None: SCREAMING_SNAKE_CASE__ =None elif embedding_proj_norm_type == "layer": SCREAMING_SNAKE_CASE__ =nn.LayerNorm(_UpperCamelCase ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase ) if encoder_hid_proj_type is None: SCREAMING_SNAKE_CASE__ =None elif encoder_hid_proj_type == "linear": SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_UpperCamelCase ) ) if added_emb_type == "prd": SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,1 ,_UpperCamelCase ) ) elif added_emb_type is None: SCREAMING_SNAKE_CASE__ =None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) SCREAMING_SNAKE_CASE__ =nn.ModuleList( [ BasicTransformerBlock( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn="""gelu""" ,attention_bias=_UpperCamelCase ,) for d in range(_UpperCamelCase ) ] ) if norm_in_type == "layer": SCREAMING_SNAKE_CASE__ =nn.LayerNorm(_UpperCamelCase ) elif norm_in_type is None: SCREAMING_SNAKE_CASE__ =None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) SCREAMING_SNAKE_CASE__ =nn.LayerNorm(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =nn.Linear(_UpperCamelCase ,_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0000.0 ) causal_attention_mask.triu_(1 ) SCREAMING_SNAKE_CASE__ =causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" ,_UpperCamelCase ,persistent=_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,_UpperCamelCase ) ) SCREAMING_SNAKE_CASE__ =nn.Parameter(torch.zeros(1 ,_UpperCamelCase ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __A ( self : Dict ) -> Dict[str, AttentionProcessor]: '''simple docstring''' SCREAMING_SNAKE_CASE__ ={} def fn_recursive_add_processors(_UpperCamelCase : str ,_UpperCamelCase : torch.nn.Module ,_UpperCamelCase : Dict[str, AttentionProcessor] ): if hasattr(_UpperCamelCase ,"""set_processor""" ): SCREAMING_SNAKE_CASE__ =module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" ,_UpperCamelCase ,_UpperCamelCase ) return processors for name, module in self.named_children(): fn_recursive_add_processors(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) return processors def __A ( self : Dict ,_UpperCamelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ =len(self.attn_processors.keys() ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) and len(_UpperCamelCase ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(_UpperCamelCase : str ,_UpperCamelCase : torch.nn.Module ,_UpperCamelCase : Optional[Any] ): if hasattr(_UpperCamelCase ,"""set_processor""" ): if not isinstance(_UpperCamelCase ,_UpperCamelCase ): module.set_processor(_UpperCamelCase ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" ,_UpperCamelCase ,_UpperCamelCase ) for name, module in self.named_children(): fn_recursive_attn_processor(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) def __A ( self : Any ) -> str: '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def __A ( self : Optional[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Union[torch.Tensor, float, int] ,_UpperCamelCase : torch.FloatTensor ,_UpperCamelCase : Optional[torch.FloatTensor] = None ,_UpperCamelCase : Optional[torch.BoolTensor] = None ,_UpperCamelCase : bool = True ,) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ =hidden_states.shape[0] SCREAMING_SNAKE_CASE__ =timestep if not torch.is_tensor(_UpperCamelCase ): SCREAMING_SNAKE_CASE__ =torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device ) elif torch.is_tensor(_UpperCamelCase ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE__ =timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE__ =timesteps * torch.ones(_UpperCamelCase ,dtype=timesteps.dtype ,device=timesteps.device ) SCREAMING_SNAKE_CASE__ =self.time_proj(_UpperCamelCase ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. SCREAMING_SNAKE_CASE__ =timesteps_projected.to(dtype=self.dtype ) SCREAMING_SNAKE_CASE__ =self.time_embedding(_UpperCamelCase ) if self.embedding_proj_norm is not None: SCREAMING_SNAKE_CASE__ =self.embedding_proj_norm(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =self.embedding_proj(_UpperCamelCase ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: SCREAMING_SNAKE_CASE__ =self.encoder_hidden_states_proj(_UpperCamelCase ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" ) SCREAMING_SNAKE_CASE__ =self.proj_in(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =self.positional_embedding.to(hidden_states.dtype ) SCREAMING_SNAKE_CASE__ =[] SCREAMING_SNAKE_CASE__ =0 if encoder_hidden_states is not None: additional_embeds.append(_UpperCamelCase ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: SCREAMING_SNAKE_CASE__ =proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: SCREAMING_SNAKE_CASE__ =hidden_states[:, None, :] SCREAMING_SNAKE_CASE__ =additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: SCREAMING_SNAKE_CASE__ =self.prd_embedding.to(hidden_states.dtype ).expand(_UpperCamelCase ,-1 ,-1 ) additional_embeds.append(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =torch.cat( _UpperCamelCase ,dim=1 ,) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens SCREAMING_SNAKE_CASE__ =additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: SCREAMING_SNAKE_CASE__ =F.pad( _UpperCamelCase ,( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) ,value=0.0 ,) SCREAMING_SNAKE_CASE__ =hidden_states + positional_embeddings if attention_mask is not None: SCREAMING_SNAKE_CASE__ =(1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0 SCREAMING_SNAKE_CASE__ =F.pad(_UpperCamelCase ,(0, self.additional_embeddings) ,value=0.0 ) SCREAMING_SNAKE_CASE__ =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) SCREAMING_SNAKE_CASE__ =attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 ) if self.norm_in is not None: SCREAMING_SNAKE_CASE__ =self.norm_in(_UpperCamelCase ) for block in self.transformer_blocks: SCREAMING_SNAKE_CASE__ =block(_UpperCamelCase ,attention_mask=_UpperCamelCase ) SCREAMING_SNAKE_CASE__ =self.norm_out(_UpperCamelCase ) if self.prd_embedding is not None: SCREAMING_SNAKE_CASE__ =hidden_states[:, -1] else: SCREAMING_SNAKE_CASE__ =hidden_states[:, additional_embeddings_len:] SCREAMING_SNAKE_CASE__ =self.proj_to_clip_embeddings(_UpperCamelCase ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=_UpperCamelCase ) def __A ( self : Union[str, Any] ,_UpperCamelCase : Tuple ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ =(prior_latents * self.clip_std) + self.clip_mean return prior_latents
588
0
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase_ : int = logging.get_logger(__name__) class _lowerCamelCase ( UpperCamelCase_ ): __a = "AutoTokenizer" __a = ["tokenizer"] __a = { "semantic_prompt": 1, "coarse_prompt": 2, "fine_prompt": 2, } def __init__( self , lowerCAmelCase , lowerCAmelCase=None ) -> str: super().__init__(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= speaker_embeddings @classmethod def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase="speaker_embeddings_path.json" , **lowerCAmelCase ) -> List[Any]: if speaker_embeddings_dict_path is not None: SCREAMING_SNAKE_CASE__: Dict= get_file_from_repo( lowerCAmelCase , lowerCAmelCase , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase ) , revision=kwargs.pop('''revision''' , lowerCAmelCase ) , ) if speaker_embeddings_path is None: logger.warning( f'`{os.path.join(lowerCAmelCase , lowerCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) SCREAMING_SNAKE_CASE__: Dict= None else: with open(lowerCAmelCase ) as speaker_embeddings_json: SCREAMING_SNAKE_CASE__: List[Any]= json.load(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[Any]= None SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoTokenizer.from_pretrained(lowerCAmelCase , **lowerCAmelCase ) return cls(tokenizer=lowerCAmelCase , speaker_embeddings=lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase="speaker_embeddings_path.json" , lowerCAmelCase="speaker_embeddings" , lowerCAmelCase = False , **lowerCAmelCase , ) -> List[Any]: if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowerCAmelCase , lowerCAmelCase , '''v2''' ) , exist_ok=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= {} SCREAMING_SNAKE_CASE__: Optional[int]= save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": SCREAMING_SNAKE_CASE__: Tuple= self._load_voice_preset(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , lowerCAmelCase , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase , ) SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(lowerCAmelCase , f'{prompt_key}_{key}.npy' ) SCREAMING_SNAKE_CASE__: Dict= tmp_dict with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , '''w''' ) as fp: json.dump(lowerCAmelCase , lowerCAmelCase ) super().save_pretrained(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase = None , **lowerCAmelCase ) -> Tuple: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.speaker_embeddings[voice_preset] SCREAMING_SNAKE_CASE__: Any= {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) SCREAMING_SNAKE_CASE__: List[Any]= get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase ) , revision=kwargs.pop('''revision''' , lowerCAmelCase ) , ) if path is None: raise ValueError( f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) SCREAMING_SNAKE_CASE__: int= np.load(lowerCAmelCase ) return voice_preset_dict def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> Any: for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="pt" , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ) -> List[Any]: if voice_preset is not None and not isinstance(lowerCAmelCase , lowerCAmelCase ): if ( isinstance(lowerCAmelCase , lowerCAmelCase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): SCREAMING_SNAKE_CASE__: List[Any]= self._load_voice_preset(lowerCAmelCase ) else: if isinstance(lowerCAmelCase , lowerCAmelCase ) and not voice_preset.endswith('''.npz''' ): SCREAMING_SNAKE_CASE__: Tuple= voice_preset + '''.npz''' SCREAMING_SNAKE_CASE__: Union[str, Any]= np.load(lowerCAmelCase ) if voice_preset is not None: self._validate_voice_preset_dict(lowerCAmelCase , **lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Union[str, Any]= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= self.tokenizer( lowerCAmelCase , return_tensors=lowerCAmelCase , padding='''max_length''' , max_length=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase , ) if voice_preset is not None: SCREAMING_SNAKE_CASE__: Dict= voice_preset return encoded_text
64
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __a = Features({"text": Value("string" )} ) __a = Features({"labels": ClassLabel} ) __a = "text" __a = "labels" def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self ) SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy() SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column] SCREAMING_SNAKE_CASE__: List[str]= label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
64
1
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() lowerCAmelCase__ : int = logging.get_logger(__name__) lowerCAmelCase__ : Any = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } lowerCAmelCase__ : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]: for attribute in key.split('.' ): snake_case__ : Tuple = getattr(A__ , A__ ) if weight_type is not None: snake_case__ : Any = getattr(A__ , A__ ).shape else: snake_case__ : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case__ : int = value elif weight_type == "weight_g": snake_case__ : Optional[Any] = value elif weight_type == "weight_v": snake_case__ : Any = value elif weight_type == "bias": snake_case__ : List[str] = value else: snake_case__ : Optional[int] = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def UpperCamelCase__ ( A__ , A__ ) -> Dict: snake_case__ : str = [] snake_case__ : str = fairseq_model.state_dict() snake_case__ : Dict = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , ) snake_case__ : Optional[int] = True else: for key, mapped_key in MAPPING.items(): snake_case__ : int = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue snake_case__ : Tuple = True if "*" in mapped_key: snake_case__ : Any = name.split(A__ )[0].split('.' )[-2] snake_case__ : List[Any] = mapped_key.replace('*' , A__ ) if "weight_g" in name: snake_case__ : Optional[int] = 'weight_g' elif "weight_v" in name: snake_case__ : Optional[int] = 'weight_v' elif "bias" in name: snake_case__ : int = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Union[str, Any] = 'weight' else: snake_case__ : List[Any] = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> int: snake_case__ : List[str] = full_name.split('conv_layers.' )[-1] snake_case__ : Any = name.split('.' ) snake_case__ : List[str] = int(items[0] ) snake_case__ : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case__ : int = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case__ : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) snake_case__ : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A__ ) @torch.no_grad() def UpperCamelCase__ ( A__ , A__ , A__=None , A__=None , A__=True ) -> Any: if config_path is not None: snake_case__ : int = UniSpeechSatConfig.from_pretrained(A__ ) else: snake_case__ : Any = UniSpeechSatConfig() snake_case__ : Tuple = '' if is_finetuned: snake_case__ : Tuple = UniSpeechSatForCTC(A__ ) else: snake_case__ : Optional[int] = UniSpeechSatForPreTraining(A__ ) snake_case__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) snake_case__ : List[str] = model[0].eval() recursively_load_weights(A__ , A__ ) hf_wavavec.save_pretrained(A__ ) if __name__ == "__main__": lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCAmelCase__ : Dict = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
716
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ : List[Any] = '''▁''' lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __snake_case ( _lowerCamelCase ,unittest.TestCase ): __lowerCamelCase = BertGenerationTokenizer __lowerCamelCase = False __lowerCamelCase = True def __a ( self ) -> Optional[int]: '''simple docstring''' super().setUp() snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : List[str] = '<s>' snake_case__ : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __a ( self ) -> List[str]: '''simple docstring''' snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '<pad>' ) self.assertEqual(len(__UpperCamelCase ) , 1002 ) def __a ( self ) -> int: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> Tuple: '''simple docstring''' snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) snake_case__ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , ) snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def __a ( self ) -> Dict: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) @slow def __a ( self ) -> Any: '''simple docstring''' snake_case__ : int = 'Hello World!' snake_case__ : Union[str, Any] = [18536, 2260, 101] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @slow def __a ( self ) -> Optional[int]: '''simple docstring''' snake_case__ : str = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) snake_case__ : List[Any] = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) ) @require_torch @slow def __a ( self ) -> List[str]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10] snake_case__ : Optional[int] = ' '.join(__UpperCamelCase ) snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase ) snake_case__ : Dict = BertGenerationConfig() snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__UpperCamelCase ) model(**__UpperCamelCase ) @slow def __a ( self ) -> Dict: '''simple docstring''' snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
699
0
from string import ascii_uppercase _lowerCAmelCase : Dict = {str(ord(c) - 55): c for c in ascii_uppercase} def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> str: if isinstance(__lowercase , __lowercase ): raise TypeError("int() can't convert non-string with explicit base" ) if num < 0: raise ValueError("parameter must be positive int" ) if isinstance(__lowercase , __lowercase ): raise TypeError("'str' object cannot be interpreted as an integer" ) if isinstance(__lowercase , __lowercase ): raise TypeError("'float' object cannot be interpreted as an integer" ) if base in (0, 1): raise ValueError("base must be >= 2" ) if base > 36: raise ValueError("base must be <= 36" ) A_ : List[Any] = "" A_ : Union[str, Any] = 0 A_ : Dict = 0 while div != 1: A_ , A_ : str = divmod(__lowercase , __lowercase ) if base >= 11 and 9 < mod < 36: A_ : List[Any] = ALPHABET_VALUES[str(__lowercase )] else: A_ : Union[str, Any] = str(__lowercase ) new_value += actual_value A_ : int = num // base A_ : Optional[int] = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(__lowercase ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
454
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowercase ( lowercase_ ): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=13 , UpperCamelCase_ : int=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : int=True , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=99 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : Union[str, Any]=64 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=512 , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[str]=1 , ): """simple docstring""" __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_input_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_labels __A = num_choices __A = scope __A = q_groups __A = k_groups __A = v_groups __A = post_attention_groups __A = intermediate_groups __A = output_groups def lowerCAmelCase_ ( self : Dict ): """simple docstring""" __A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A = None if self.use_input_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None __A = None __A = None if self.use_labels: __A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __A = ids_tensor([self.batch_size] , self.num_choices ) __A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Any ): """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ): """simple docstring""" __A = SqueezeBertModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , UpperCamelCase_ ) __A = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ): """simple docstring""" __A = SqueezeBertForMaskedLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ): """simple docstring""" __A = SqueezeBertForQuestionAnswering(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ): """simple docstring""" __A = self.num_labels __A = SqueezeBertForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): """simple docstring""" __A = self.num_labels __A = SqueezeBertForTokenClassification(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ): """simple docstring""" __A = self.num_choices __A = SqueezeBertForMultipleChoice(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __A = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : int ): """simple docstring""" __A = self.prepare_config_and_inputs() ((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs __A = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) SCREAMING_SNAKE_CASE = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" __A = SqueezeBertModelTester(self ) __A = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 ) def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : str ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase_ ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase_ ) @slow def lowerCAmelCase_ ( self : int ): """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __A = SqueezeBertModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @require_sentencepiece @require_tokenizers @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" __A = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) __A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) __A = model(UpperCamelCase_ )[0] __A = torch.Size((1, 3) ) self.assertEqual(output.shape , UpperCamelCase_ ) __A = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-4 ) )
637
0
def a (lowerCAmelCase__ , lowerCAmelCase__ ): __a = '''''' for word_or_phrase in separated: if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise Exception("""join() accepts only strings to be joined""" ) joined += word_or_phrase + separator return joined.strip(lowerCAmelCase__ ) if __name__ == "__main__": from doctest import testmod testmod()
700
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE = logging.get_logger(__name__) @add_end_docstrings( __A , R""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class __UpperCAmelCase ( __A ): """simple docstring""" def snake_case_ ( self , __A ): if self.framework == "tf": __a = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": __a = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ) else: raise ValueError("""Unsupported framework""" ) return masked_index def snake_case_ ( self , __A ): __a = self.get_masked_index(__A ) __a = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def snake_case_ ( self , __A ): if isinstance(__A , __A ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__A ) def snake_case_ ( self , __A , __A=None , **__A ): if return_tensors is None: __a = self.framework __a = self.tokenizer(__A , return_tensors=__A ) self.ensure_exactly_one_mask_token(__A ) return model_inputs def snake_case_ ( self , __A ): __a = self.model(**__A ) __a = model_inputs["""input_ids"""] return model_outputs def snake_case_ ( self , __A , __A=5 , __A=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: __a = target_ids.shape[0] __a = model_outputs["""input_ids"""][0] __a = model_outputs["""logits"""] if self.framework == "tf": __a = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] __a = outputs.numpy() __a = outputs[0, masked_index, :] __a = stable_softmax(__A , axis=-1 ) if target_ids is not None: __a = tf.gather_nd(tf.squeeze(__A , 0 ) , target_ids.reshape(-1 , 1 ) ) __a = tf.expand_dims(__A , 0 ) __a = tf.math.top_k(__A , k=__A ) __a , __a = topk.values.numpy(), topk.indices.numpy() else: __a = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample __a = outputs[0, masked_index, :] __a = logits.softmax(dim=-1 ) if target_ids is not None: __a = probs[..., target_ids] __a , __a = probs.topk(__A ) __a = [] __a = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): __a = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place __a = input_ids.numpy().copy() if target_ids is not None: __a = target_ids[p].tolist() __a = p # Filter padding out: __a = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back __a = self.tokenizer.decode(__A , skip_special_tokens=__A ) __a = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence} row.append(__A ) result.append(__A ) if single_mask: return result[0] return result def snake_case_ ( self , __A , __A=None ): if isinstance(__A , __A ): __a = [targets] try: __a = self.tokenizer.get_vocab() except Exception: __a = {} __a = [] for target in targets: __a = vocab.get(__A , __A ) if id_ is None: __a = self.tokenizer( __A , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , max_length=1 , truncation=__A , )["""input_ids"""] if len(__A ) == 0: logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' """We cannot replace it with anything meaningful, ignoring it""" ) continue __a = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) __a = list(set(__A ) ) if len(__A ) == 0: raise ValueError("""At least one target must be provided when passed.""" ) __a = np.array(__A ) return target_ids def snake_case_ ( self , __A=None , __A=None ): __a = {} if targets is not None: __a = self.get_target_ids(__A , __A ) __a = target_ids if top_k is not None: __a = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( """fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" ) return {}, {}, postprocess_params def __call__( self , __A , *__A , **__A ): __a = super().__call__(__A , **__A ) if isinstance(__A , __A ) and len(__A ) == 1: return outputs[0] return outputs
209
0
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b" _lowerCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] _lowerCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
18
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase : List[str] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = OrderedDict( [ ('audio-spectrogram-transformer', 'ASTFeatureExtractor'), ('beit', 'BeitFeatureExtractor'), ('chinese_clip', 'ChineseCLIPFeatureExtractor'), ('clap', 'ClapFeatureExtractor'), ('clip', 'CLIPFeatureExtractor'), ('clipseg', 'ViTFeatureExtractor'), ('conditional_detr', 'ConditionalDetrFeatureExtractor'), ('convnext', 'ConvNextFeatureExtractor'), ('cvt', 'ConvNextFeatureExtractor'), ('data2vec-audio', 'Wav2Vec2FeatureExtractor'), ('data2vec-vision', 'BeitFeatureExtractor'), ('deformable_detr', 'DeformableDetrFeatureExtractor'), ('deit', 'DeiTFeatureExtractor'), ('detr', 'DetrFeatureExtractor'), ('dinat', 'ViTFeatureExtractor'), ('donut-swin', 'DonutFeatureExtractor'), ('dpt', 'DPTFeatureExtractor'), ('encodec', 'EncodecFeatureExtractor'), ('flava', 'FlavaFeatureExtractor'), ('glpn', 'GLPNFeatureExtractor'), ('groupvit', 'CLIPFeatureExtractor'), ('hubert', 'Wav2Vec2FeatureExtractor'), ('imagegpt', 'ImageGPTFeatureExtractor'), ('layoutlmv2', 'LayoutLMv2FeatureExtractor'), ('layoutlmv3', 'LayoutLMv3FeatureExtractor'), ('levit', 'LevitFeatureExtractor'), ('maskformer', 'MaskFormerFeatureExtractor'), ('mctct', 'MCTCTFeatureExtractor'), ('mobilenet_v1', 'MobileNetV1FeatureExtractor'), ('mobilenet_v2', 'MobileNetV2FeatureExtractor'), ('mobilevit', 'MobileViTFeatureExtractor'), ('nat', 'ViTFeatureExtractor'), ('owlvit', 'OwlViTFeatureExtractor'), ('perceiver', 'PerceiverFeatureExtractor'), ('poolformer', 'PoolFormerFeatureExtractor'), ('regnet', 'ConvNextFeatureExtractor'), ('resnet', 'ConvNextFeatureExtractor'), ('segformer', 'SegformerFeatureExtractor'), ('sew', 'Wav2Vec2FeatureExtractor'), ('sew-d', 'Wav2Vec2FeatureExtractor'), ('speech_to_text', 'Speech2TextFeatureExtractor'), ('speecht5', 'SpeechT5FeatureExtractor'), ('swiftformer', 'ViTFeatureExtractor'), ('swin', 'ViTFeatureExtractor'), ('swinv2', 'ViTFeatureExtractor'), ('table-transformer', 'DetrFeatureExtractor'), ('timesformer', 'VideoMAEFeatureExtractor'), ('tvlt', 'TvltFeatureExtractor'), ('unispeech', 'Wav2Vec2FeatureExtractor'), ('unispeech-sat', 'Wav2Vec2FeatureExtractor'), ('van', 'ConvNextFeatureExtractor'), ('videomae', 'VideoMAEFeatureExtractor'), ('vilt', 'ViltFeatureExtractor'), ('vit', 'ViTFeatureExtractor'), ('vit_mae', 'ViTFeatureExtractor'), ('vit_msn', 'ViTFeatureExtractor'), ('wav2vec2', 'Wav2Vec2FeatureExtractor'), ('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'), ('wavlm', 'Wav2Vec2FeatureExtractor'), ('whisper', 'WhisperFeatureExtractor'), ('xclip', 'CLIPFeatureExtractor'), ('yolos', 'YolosFeatureExtractor'), ] ) __lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def a__ ( A_ ): '''simple docstring''' for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: __magic_name__ = model_type_to_module_name(A_ ) __magic_name__ = importlib.import_module(f'''.{module_name}''', """transformers.models""" ) try: return getattr(A_, A_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(A_, """__name__""", A_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __magic_name__ = importlib.import_module("""transformers""" ) if hasattr(A_, A_ ): return getattr(A_, A_ ) return None def a__ ( A_, A_ = None, A_ = False, A_ = False, A_ = None, A_ = None, A_ = None, A_ = False, **A_, ): '''simple docstring''' __magic_name__ = get_file_from_repo( A_, A_, cache_dir=A_, force_download=A_, resume_download=A_, proxies=A_, use_auth_token=A_, revision=A_, local_files_only=A_, ) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(A_, encoding="""utf-8""" ) as reader: return json.load(A_ ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Dict ) -> Optional[int]: """simple docstring""" raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(UpperCamelCase__ ) def _lowercase ( cls : Optional[int] , UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> str: """simple docstring""" __magic_name__ = kwargs.pop("""config""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""trust_remote_code""" , UpperCamelCase__ ) __magic_name__ = True __magic_name__ , __magic_name__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = config_dict.get("""feature_extractor_type""" , UpperCamelCase__ ) __magic_name__ = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): __magic_name__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ ) # It could be in `config.feature_extractor_type`` __magic_name__ = getattr(UpperCamelCase__ , """feature_extractor_type""" , UpperCamelCase__ ) if hasattr(UpperCamelCase__ , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map: __magic_name__ = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: __magic_name__ = feature_extractor_class_from_name(UpperCamelCase__ ) __magic_name__ = feature_extractor_auto_map is not None __magic_name__ = feature_extractor_class is not None or type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING __magic_name__ = resolve_trust_remote_code( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if has_remote_code and trust_remote_code: __magic_name__ = get_class_from_dynamic_module( UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = kwargs.pop("""code_revision""" , UpperCamelCase__ ) if os.path.isdir(UpperCamelCase__ ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(UpperCamelCase__ ) in FEATURE_EXTRACTOR_MAPPING: __magic_name__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCamelCase__ )] return feature_extractor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) raise ValueError( F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def _lowercase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Dict: """simple docstring""" FEATURE_EXTRACTOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
529
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __lowerCamelCase :int = get_tests_dir('fixtures') __lowerCamelCase :List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') __lowerCamelCase :str = get_tests_dir('fixtures/dummy-config.json') class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> Dict: lowerCamelCase : str = 0 def a__ ( self: Union[str, Any] )-> str: lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(__a , __a ) def a__ ( self: Tuple )-> List[str]: lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def a__ ( self: List[Any] )-> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase : Any = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__a ).to_dict() config_dict.pop("""feature_extractor_type""" ) lowerCamelCase : str = WavaVecaFeatureExtractor(**__a ) # save in new folder model_config.save_pretrained(__a ) config.save_pretrained(__a ) lowerCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(__a ) # make sure private variable is not incorrectly saved lowerCamelCase : Any = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(__a , __a ) def a__ ( self: List[str] )-> str: lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) def a__ ( self: Any )-> Tuple: with self.assertRaisesRegex( __a , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def a__ ( self: Optional[Any] )-> int: with self.assertRaisesRegex( __a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCamelCase : str = AutoFeatureExtractor.from_pretrained(__a , revision="""aaaaaa""" ) def a__ ( self: List[str] )-> Tuple: with self.assertRaisesRegex( __a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowerCamelCase : int = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def a__ ( self: Union[str, Any] )-> List[Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a ): lowerCamelCase : int = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__a ): lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a ) lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def a__ ( self: int )-> Optional[Any]: try: AutoConfig.register("""custom""" , __a ) AutoFeatureExtractor.register(__a , __a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a ): AutoFeatureExtractor.register(__a , __a ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCamelCase : Tuple = CustomFeatureExtractor.from_pretrained(__a ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(__a ) lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(__a ) self.assertIsInstance(__a , __a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def a__ ( self: int )-> int: class A__ ( __lowercase): """simple docstring""" snake_case__ : Optional[Any] =True try: AutoConfig.register("""custom""" , __a ) AutoFeatureExtractor.register(__a , __a ) # If remote code is not set, the default is to use local lowerCamelCase : int = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__a ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(__a , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
42
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Any = False @skip_mps class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline snake_case__ : Any =False snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Dict )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Union[str, Any] )-> Any: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: Tuple )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : Optional[int] = CLIPTextModel(__a ) lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]: if str(__a ).startswith("""mps""" ): lowerCamelCase : Tuple = torch.manual_seed(__a ) else: lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Dict = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Dict )-> str: lowerCamelCase : Tuple = """cpu""" lowerCamelCase : List[str] = self.get_dummy_components() lowerCamelCase : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Any = self.get_dummy_inputs(__a ) lowerCamelCase : Union[str, Any] = pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def a__ ( self: int )-> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Union[str, Any] )-> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> int: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: Dict )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: Optional[int] )-> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Tuple: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: str )-> str: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: Any )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Dict )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: int )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = torch.manual_seed(51 ) lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : Dict = """a painting of an elephant with glasses""" lowerCamelCase : Any = [5, 7] lowerCamelCase : Tuple = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
42
1
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCAmelCase = random.Random() if is_torch_available(): import torch def _lowercase ( a__ : List[Any] , a__ : Optional[int]=1.0 , a__ : Union[str, Any]=None , a__ : int=None ) -> Any: """simple docstring""" if rng is None: _UpperCamelCase = global_rng _UpperCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCamelCase_ ( unittest.TestCase ): def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=4_00 , lowerCamelCase_=20_00 , lowerCamelCase_=1 , lowerCamelCase_=0.0 , lowerCamelCase_=1_60_00 , lowerCamelCase_=True , lowerCamelCase_=True , ) -> List[Any]: """simple docstring""" _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = min_seq_length _UpperCamelCase = max_seq_length _UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCamelCase = feature_size _UpperCamelCase = padding_value _UpperCamelCase = sampling_rate _UpperCamelCase = return_attention_mask _UpperCamelCase = do_normalize def lowercase ( self ) -> int: """simple docstring""" return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowercase ( self , lowerCamelCase_=False , lowerCamelCase_=False ) -> Optional[Any]: """simple docstring""" def _flatten(lowerCamelCase_ ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: _UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size _UpperCamelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCamelCase = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase_ ( lowercase_ , unittest.TestCase ): __lowercase : Optional[int] = ASTFeatureExtractor def lowercase ( self ) -> str: """simple docstring""" _UpperCamelCase = ASTFeatureExtractionTester(self ) def lowercase ( self ) -> int: """simple docstring""" _UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] _UpperCamelCase = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input _UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values _UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) # Test batched _UpperCamelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="np" ).input_values _UpperCamelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _UpperCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] _UpperCamelCase = np.asarray(lowerCamelCase_ ) _UpperCamelCase = feat_extract(lowerCamelCase_ , return_tensors="np" ).input_values _UpperCamelCase = feat_extract(lowerCamelCase_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) @require_torch def lowercase ( self ) -> Any: """simple docstring""" import torch _UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCamelCase = np.random.rand(1_00 ).astype(np.floataa ) _UpperCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCamelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) _UpperCamelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def lowercase ( self , lowerCamelCase_ ) -> List[Any]: """simple docstring""" from datasets import load_dataset _UpperCamelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCamelCase = ds.sort("id" ).select(range(lowerCamelCase_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def lowercase ( self ) -> str: """simple docstring""" _UpperCamelCase = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on _UpperCamelCase = self._load_datasamples(1 ) _UpperCamelCase = ASTFeatureExtractor() _UpperCamelCase = feature_extractor(lowerCamelCase_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
147
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any: '''simple docstring''' a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ ) a_ = downstream_dict["projector.weight"] a_ = downstream_dict["projector.bias"] a_ = downstream_dict["model.post_net.linear.weight"] a_ = downstream_dict["model.post_net.linear.bias"] return model def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict: '''simple docstring''' a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ ) a_ = downstream_dict["model.linear.weight"] a_ = downstream_dict["model.linear.bias"] return model def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]: '''simple docstring''' a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ ) a_ = downstream_dict["connector.weight"] a_ = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): a_ = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] a_ = downstream_dict["objective.W"] return model @torch.no_grad() def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]: '''simple docstring''' a_ = torch.load(lowercase__ ,map_location="cpu" ) a_ = checkpoint["Downstream"] a_ = UniSpeechSatConfig.from_pretrained(lowercase__ ) a_ = WavaVecaFeatureExtractor.from_pretrained( lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ ) a_ = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ ) elif arch.endswith("ForAudioFrameClassification" ): a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ ) elif arch.endswith("ForXVector" ): a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: a_ = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(lowercase__ ) hf_model.save_pretrained(lowercase__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') a_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
685
0
import math import random def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :bool = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value _UpperCAmelCase = 0.02 def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> float: __lowerCAmelCase : Union[str, Any] = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(SCREAMING_SNAKE_CASE ): # Forward propagation __lowerCAmelCase : List[str] = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __lowerCAmelCase : Tuple = (expected / 100) - layer_a # Error delta __lowerCAmelCase : Any = layer_1_error * sigmoid_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase = int(input('Expected value: ')) _UpperCAmelCase = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ 'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
240
0
"""simple docstring""" from math import factorial __A = {str(digit): factorial(digit) for digit in range(10)} def __A (_SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_SCREAMING_SNAKE_CASE ) ) def __A (_SCREAMING_SNAKE_CASE = 60 , _SCREAMING_SNAKE_CASE = 100_0000 ) ->int: """simple docstring""" if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length lowerCAmelCase__ :Union[str, Any] = 0 # the cached sizes of the previous chains lowerCAmelCase__ :dict[int, int] = {} for start_chain_element in range(1 , _SCREAMING_SNAKE_CASE ): # The temporary set will contain the elements of the chain lowerCAmelCase__ :Optional[Any] = set() lowerCAmelCase__ :Tuple = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. lowerCAmelCase__ :Dict = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_SCREAMING_SNAKE_CASE ) chain_set_length += 1 lowerCAmelCase__ :int = digit_factorial_sum(_SCREAMING_SNAKE_CASE ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] lowerCAmelCase__ :Union[str, Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution()}''')
93
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { """configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""], """processing_mgp_str""": ["""MgpstrProcessor"""], """tokenization_mgp_str""": ["""MgpstrTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ """MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""", """MgpstrModel""", """MgpstrPreTrainedModel""", """MgpstrForSceneTextRecognition""", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
93
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs __UpperCamelCase : List[Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""") __UpperCamelCase : Any = cvtColor(img, COLOR_BGR2GRAY) def snake_case ( ): '''simple docstring''' __lowercase = cn.convert_to_negative(lowerCamelCase ) # assert negative_img array for at least one True assert negative_img.any() def snake_case ( ): '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def snake_case ( ): '''simple docstring''' __lowercase = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def snake_case ( ): '''simple docstring''' __lowercase = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() __lowercase = canny.canny(lowerCamelCase ) # assert canny array for at least one True assert canny_array.any() def snake_case ( ): '''simple docstring''' assert gg.gaussian_filter(lowerCamelCase , 5 , sigma=0.9 ).all() def snake_case ( ): '''simple docstring''' __lowercase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __lowercase = conv.img_convolve(lowerCamelCase , lowerCamelCase ).astype(lowerCamelCase ) assert res.any() def snake_case ( ): '''simple docstring''' assert med.median_filter(lowerCamelCase , 3 ).any() def snake_case ( ): '''simple docstring''' __lowercase , __lowercase = sob.sobel_filter(lowerCamelCase ) assert grad.any() and theta.any() def snake_case ( ): '''simple docstring''' __lowercase = sp.make_sepia(lowerCamelCase , 20 ) assert sepia.all() def snake_case ( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' __lowercase = bs.Burkes(imread(lowerCamelCase , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def snake_case ( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' __lowercase = rs.NearestNeighbour(imread(lowerCamelCase , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def snake_case ( ): '''simple docstring''' __lowercase = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. __lowercase = imread(lowerCamelCase , 0 ) # Test for get_neighbors_pixel function() return not None __lowercase = 0 __lowercase = 0 __lowercase = image[x_coordinate][y_coordinate] __lowercase = lbp.get_neighbors_pixel( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __lowercase = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __lowercase = lbp.local_binary_value(lowerCamelCase , lowerCamelCase , lowerCamelCase ) assert lbp_image.any()
711
import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures __UpperCamelCase : Any = logging.get_logger(__name__) @dataclass class __UpperCamelCase : __snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} ) __snake_case :str = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) __snake_case :int = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __snake_case :bool = field( default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _a ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.task_name.lower() class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :Optional[int] = 'train' __snake_case :int = 'dev' __snake_case :Any = 'test' class __UpperCamelCase ( _lowerCAmelCase ): __snake_case :GlueDataTrainingArguments __snake_case :str __snake_case :List[InputFeatures] def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , ) __lowercase = args __lowercase = glue_processors[args.task_name]() __lowercase = glue_output_modes[args.task_name] if isinstance(_lowerCAmelCase , _lowerCAmelCase ): try: __lowercase = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __lowercase = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __lowercase = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase , __lowercase = label_list[2], label_list[1] __lowercase = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase = cached_features_file + """.lock""" with FileLock(_lowerCAmelCase ): if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache: __lowercase = time.time() __lowercase = torch.load(_lowerCAmelCase ) logger.info( F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(F'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __lowercase = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase = self.processor.get_test_examples(args.data_dir ) else: __lowercase = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase = examples[:limit_length] __lowercase = glue_convert_examples_to_features( _lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , ) __lowercase = time.time() torch.save(self.features , _lowerCAmelCase ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : Dict ) -> Optional[int]: """simple docstring""" return len(self.features ) def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures: """simple docstring""" return self.features[i] def _a ( self : str ) -> int: """simple docstring""" return self.label_list
53
0
def __lowerCamelCase ( A__ : int = 1000 ) -> int: lowerCamelCase_ : Dict = 2**power lowerCamelCase_ : List[Any] = str(__A ) lowerCamelCase_ : Any = list(__A ) lowerCamelCase_ : Union[str, Any] = 0 for i in list_num: sum_of_num += int(__A ) return sum_of_num if __name__ == "__main__": snake_case__ : Dict = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) snake_case__ : Any = solution(power) print('Sum of the digits is: ', result)
278
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def __snake_case ( __A : str ) -> Dict: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def __snake_case ( ) -> Union[str, Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def __snake_case ( ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'mock-s3-bucket' SCREAMING_SNAKE_CASE : Tuple = F"""s3://{mock_bucket}""" SCREAMING_SNAKE_CASE : str = extract_path_from_uri(__A ) assert dataset_path.startswith('s3://' ) is False SCREAMING_SNAKE_CASE : Optional[int] = './local/path' SCREAMING_SNAKE_CASE : str = extract_path_from_uri(__A ) assert dataset_path == new_dataset_path def __snake_case ( __A : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = is_remote_filesystem(__A ) assert is_remote is True SCREAMING_SNAKE_CASE : Dict = fsspec.filesystem('file' ) SCREAMING_SNAKE_CASE : Dict = is_remote_filesystem(__A ) assert is_remote is False @pytest.mark.parametrize('compression_fs_class' , __A ) def __snake_case ( __A : Any , __A : Optional[int] , __A : List[Any] , __A : Optional[Any] , __A : str , __A : Tuple , __A : Dict ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file} SCREAMING_SNAKE_CASE : List[Any] = input_paths[compression_fs_class.protocol] if input_path is None: SCREAMING_SNAKE_CASE : str = F"""for '{compression_fs_class.protocol}' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__A ) SCREAMING_SNAKE_CASE : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__A ) assert isinstance(__A , __A ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(__A ) SCREAMING_SNAKE_CASE : Dict = expected_filename[: expected_filename.rindex('.' )] assert fs.glob('*' ) == [expected_filename] with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('protocol' , ['zip', 'gzip'] ) def __snake_case ( __A : Any , __A : int , __A : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path} SCREAMING_SNAKE_CASE : Optional[int] = compressed_file_paths[protocol] SCREAMING_SNAKE_CASE : str = 'dataset.jsonl' SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{protocol}://{member_file_path}::{compressed_file_path}""" SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Any = fsspec.get_fs_token_paths(__A ) assert fs.isfile(__A ) assert not fs.isfile('non_existing_' + member_file_path ) @pytest.mark.integration def __snake_case ( __A : List[str] , __A : int , __A : int , __A : Optional[int] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hf_api.dataset_info(__A , token=__A ) SCREAMING_SNAKE_CASE : str = HfFileSystem(repo_info=__A , token=__A ) assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"] assert hffs.isdir('data' ) assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' ) with open(__A ) as f: assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read() def __snake_case ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = 'bz2' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(__A , __A , clobber=__A ) with pytest.warns(__A ) as warning_info: importlib.reload(datasets.filesystems ) assert len(__A ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
265
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_SCREAMING_SNAKE_CASE , """_dynamo""" ): return False return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : bool = True ) -> Any: """simple docstring""" lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) lowerCAmelCase = is_compiled_module(_SCREAMING_SNAKE_CASE ) if is_compiled: lowerCAmelCase = model lowerCAmelCase = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase = model.module if not keep_fpaa_wrapper: lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , """forward""" ) lowerCAmelCase = model.__dict__.pop("""_original_forward""" , _SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(_SCREAMING_SNAKE_CASE , """__wrapped__""" ): lowerCAmelCase = forward.__wrapped__ if forward == original_forward: break lowerCAmelCase = forward if getattr(_SCREAMING_SNAKE_CASE , """_converted_to_transformer_engine""" , _SCREAMING_SNAKE_CASE ): convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE ) if is_compiled: lowerCAmelCase = model lowerCAmelCase = compiled_model return model def _snake_case ( ) -> List[Any]: """simple docstring""" PartialState().wait_for_everyone() def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @contextmanager def _snake_case ( **_SCREAMING_SNAKE_CASE : Dict ) -> Dict: """simple docstring""" for key, value in kwargs.items(): lowerCAmelCase = str(_SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Dict: """simple docstring""" if not hasattr(_SCREAMING_SNAKE_CASE , """__qualname__""" ) and not hasattr(_SCREAMING_SNAKE_CASE , """__name__""" ): lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , """__class__""" , _SCREAMING_SNAKE_CASE ) if hasattr(_SCREAMING_SNAKE_CASE , """__qualname__""" ): return obj.__qualname__ if hasattr(_SCREAMING_SNAKE_CASE , """__name__""" ): return obj.__name__ return str(_SCREAMING_SNAKE_CASE ) def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict ) -> Tuple: """simple docstring""" for key, value in source.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCAmelCase = destination.setdefault(_SCREAMING_SNAKE_CASE , {} ) merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = value return destination def _snake_case ( _SCREAMING_SNAKE_CASE : int = None ) -> bool: """simple docstring""" if port is None: lowerCAmelCase = 29_500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("""localhost""", port) ) == 0
720
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float: """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ) def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: lowerCAmelCase = ( """Wrong input data's dimensions... """ f'dataset : {dataset.ndim}, value_array : {value_array.ndim}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) try: if dataset.shape[1] != value_array.shape[1]: lowerCAmelCase = ( """Wrong input data's shape... """ f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}' ) raise ValueError(_SCREAMING_SNAKE_CASE ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: lowerCAmelCase = ( """Input data have different datatype... """ f'dataset : {dataset.dtype}, value_array : {value_array.dtype}' ) raise TypeError(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for value in value_array: lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , dataset[0] ) lowerCAmelCase = dataset[0].tolist() for dataset_value in dataset[1:]: lowerCAmelCase = euclidean(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if dist > temp_dist: lowerCAmelCase = temp_dist lowerCAmelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def _snake_case ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ) -> float: """simple docstring""" return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / (norm(_SCREAMING_SNAKE_CASE ) * norm(_SCREAMING_SNAKE_CASE )) if __name__ == "__main__": import doctest doctest.testmod()
344
0
import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( "split_dict" , [ SplitDict(), SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ), SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ), SplitDict({"train": SplitInfo()} ), ] , ) def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: List[Any] =split_dict._to_yaml_list() assert len(__a ) == len(__a ) lowerCamelCase__: List[str] =SplitDict._from_yaml_list(__a ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump lowerCamelCase__: Any =None # the split name of split_dict takes over the name of the split info object lowerCamelCase__: str =split_name assert split_dict == reloaded @pytest.mark.parametrize( "split_info" , [SplitInfo(), SplitInfo(dataset_name=__a ), SplitInfo(dataset_name="my_dataset" )] ) def lowerCAmelCase_ ( __a ) -> List[str]: """simple docstring""" lowerCamelCase__: List[str] =asdict(SplitDict({"train": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
59
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __lowercase : Any ) -> List[Any]: _snake_case = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def a_ ( __lowercase : Dict ) -> Tuple: _snake_case , _snake_case = emb.weight.shape _snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _snake_case = emb.weight.data return lin_layer def a_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any]=None ) -> Tuple: _snake_case = {} for old_key in state_dict.keys(): _snake_case = old_key if "moe_layer.experts." in key: if expert_idx is not None: _snake_case = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' ) else: _snake_case = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: _snake_case = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: _snake_case = key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: _snake_case = key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: _snake_case = key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: _snake_case = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: _snake_case = key.replace('final_layer_norm' , 'ff_layer_norm' ) _snake_case = state_dict[old_key] return new_dict def a_ ( __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : List[str] , __lowercase : str = WEIGHTS_NAME ) -> Union[str, Any]: _snake_case = [] _snake_case = 0 os.makedirs(__lowercase , exist_ok=__lowercase ) for expert in range(__lowercase ): _snake_case = switch_checkpoint_path + f'''-rank-{expert}.pt''' if os.path.isfile(__lowercase ): _snake_case = torch.load(__lowercase )['model'] remove_ignore_keys_(__lowercase ) _snake_case = rename_fairseq_keys(__lowercase , __lowercase ) _snake_case = os.path.join( __lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) ) torch.save(__lowercase , __lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__lowercase )[0]].dtype ) # Add the last block _snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) ) _snake_case = torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(__lowercase ) _snake_case = rename_fairseq_keys(__lowercase , __lowercase ) _snake_case = shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__lowercase ) == 1: _snake_case = os.path.join(__lowercase , __lowercase ) torch.save(__lowercase , __lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__lowercase , __lowercase ) # Otherwise, let's build the index _snake_case = {} for idx, shard in enumerate(__lowercase ): _snake_case = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' ) _snake_case = os.path.join(__lowercase , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__lowercase , os.path.join(__lowercase , __lowercase ) ) for key in shard: _snake_case = shard_file # Add the metadata _snake_case = {'total_size': total_size} _snake_case = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(__lowercase , __lowercase ) , 'w' , encoding='utf-8' ) as f: _snake_case = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + '\n' f.write(__lowercase ) return metadata, index if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) _lowerCamelCase : List[str] = parser.parse_args() _lowerCamelCase , _lowerCamelCase : Union[str, Any] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) _lowerCamelCase : Tuple = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) _lowerCamelCase : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
686
0
"""simple docstring""" from __future__ import annotations class snake_case : """simple docstring""" def __init__( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : str ): UpperCAmelCase__ , UpperCAmelCase__ = text, pattern UpperCAmelCase__ , UpperCAmelCase__ = len(lowerCamelCase__ ), len(lowerCamelCase__ ) def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ): for i in range(self.patLen - 1 ,-1 ,-1 ): if char == self.pattern[i]: return i return -1 def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : int ): for i in range(self.patLen - 1 ,-1 ,-1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __lowerCAmelCase ( self : Optional[int] ): # searches pattern in text and returns index positions UpperCAmelCase__ = [] for i in range(self.textLen - self.patLen + 1 ): UpperCAmelCase__ = self.mismatch_in_text(lowerCamelCase__ ) if mismatch_index == -1: positions.append(lowerCamelCase__ ) else: UpperCAmelCase__ = self.match_in_pattern(self.text[mismatch_index] ) UpperCAmelCase__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCAmelCase__ : List[Any] = 'ABAABA' lowerCAmelCase__ : int = 'AB' lowerCAmelCase__ : Any = BoyerMooreSearch(text, pattern) lowerCAmelCase__ : Optional[int] = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
632
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder lowerCAmelCase__ : str = 'base_with_context' def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase__ = weights[f'''layers_{lyr_num}'''] UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) UpperCAmelCase__ = ly_weight['attention'] UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): UpperCAmelCase__ = weights[f'''layers_{lyr_num}'''] UpperCAmelCase__ = ly_weight['attention'] UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def a_ ( lowerCamelCase , lowerCamelCase ): UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): UpperCAmelCase__ = weights[f'''layers_{lyr_num}'''] UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) UpperCAmelCase__ = ly_weight['self_attention'] UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase__ = ly_weight['MultiHeadDotProductAttention_0'] UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def a_ ( lowerCamelCase ): UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path ) UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array , lowerCamelCase ) UpperCAmelCase__ = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] UpperCAmelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) UpperCAmelCase__ = inference.parse_training_gin_file(lowerCamelCase , lowerCamelCase ) UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path , lowerCamelCase ) UpperCAmelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) UpperCAmelCase__ = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) UpperCAmelCase__ = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) UpperCAmelCase__ = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) UpperCAmelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase ) UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase ) UpperCAmelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase ) UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) UpperCAmelCase__ = SpectrogramDiffusionPipeline( notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": lowerCAmelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument( '--checkpoint_path', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='Path to the original jax model checkpoint.', ) lowerCAmelCase__ : List[str] = parser.parse_args() main(args)
632
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A ( A , A , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline __lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'} __lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} ) __lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {'latents'} def a__ (self ) -> List[Any]: """simple docstring""" return self._get_superresolution_dummy_components() def a__ (self , A , A=0 ) -> List[Any]: """simple docstring""" if str(A ).startswith('''mps''' ): _a = torch.manual_seed(A ) else: _a = torch.Generator(device=A ).manual_seed(A ) _a = floats_tensor((1, 3, 16, 16) , rng=random.Random(A ) ).to(A ) _a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) _a = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) _a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def a__ (self ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def a__ (self ) -> str: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def a__ (self ) -> str: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def a__ (self ) -> Tuple: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def a__ (self ) -> Union[str, Any]: """simple docstring""" self._test_save_load_local() def a__ (self ) -> Any: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
11
'''simple docstring''' from __future__ import annotations def lowerCAmelCase (__A , __A): """simple docstring""" if len(__A) == 0: return False _a = len(__A) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , __A) else: return binary_search(a_list[midpoint + 1 :] , __A) if __name__ == "__main__": lowercase_ = input("Enter numbers separated by comma:\n").strip() lowercase_ = [int(item.strip()) for item in user_input.split(",")] lowercase_ = int(input("Enter the number to be found in the list:\n").strip()) lowercase_ = "" if binary_search(sequence, target) else "not " print(F"""{target} was {not_str}found in {sequence}""")
11
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
498
"""simple docstring""" def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : int ) ->str: if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) A__ : str = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" A__ : List[Any] = str(bin(UpperCAmelCase__ ) )[2:] # remove the leading "0b" A__ : List[str] = max(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase__ ), b_binary.zfill(UpperCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
498
1
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class snake_case ( __snake_case ): """simple docstring""" __lowerCAmelCase = (CMStochasticIterativeScheduler,) __lowerCAmelCase = 10 def snake_case__ ( self , **lowerCAmelCase_ ): __lowercase = { "num_train_timesteps": 201, "sigma_min": 0.0_02, "sigma_max": 80.0, } config.update(**lowerCAmelCase_ ) return config def snake_case__ ( self ): __lowercase = 10 __lowercase = self.get_scheduler_config() __lowercase = self.scheduler_classes[0](**lowerCAmelCase_ ) scheduler.set_timesteps(lowerCAmelCase_ ) __lowercase = scheduler.timesteps[0] __lowercase = scheduler.timesteps[1] __lowercase = self.dummy_sample __lowercase = 0.1 * sample __lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample __lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case__ ( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def snake_case__ ( self ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCAmelCase_ ) def snake_case__ ( self ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowerCAmelCase_ ) __lowercase = 1 scheduler.set_timesteps(lowerCAmelCase_ ) __lowercase = scheduler.timesteps __lowercase = torch.manual_seed(0 ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCAmelCase_ ): # 1. scale model input __lowercase = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict noise residual __lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 3. predict previous sample x_t-1 __lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) ) __lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2 assert abs(result_mean.item() - 0.25_10 ) < 1E-3 def snake_case__ ( self ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowerCAmelCase_ ) __lowercase = [106, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) __lowercase = scheduler.timesteps __lowercase = torch.manual_seed(0 ) __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input __lowercase = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict noise residual __lowercase = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 3. predict previous sample x_t-1 __lowercase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample __lowercase = pred_prev_sample __lowercase = torch.sum(torch.abs(lowerCAmelCase_ ) ) __lowercase = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2 assert abs(result_mean.item() - 0.45_27 ) < 1E-3 def snake_case__ ( self ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowerCAmelCase_ ) __lowercase = [39, 30, 12, 15, 0] with self.assertRaises(lowerCAmelCase_ , msg="`timesteps` must be in descending order." ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def snake_case__ ( self ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowerCAmelCase_ ) __lowercase = [39, 30, 12, 1, 0] __lowercase = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def snake_case__ ( self ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**lowerCAmelCase_ ) __lowercase = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
321
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowerCAmelCase__ = logging.get_logger(__name__) # General docstring lowerCAmelCase__ = 'ResNetConfig' # Base docstring lowerCAmelCase__ = 'microsoft/resnet-50' lowerCAmelCase__ = [1, 2_048, 7, 7] # Image classification docstring lowerCAmelCase__ = 'microsoft/resnet-50' lowerCAmelCase__ = 'tiger cat' lowerCAmelCase__ = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ): super().__init__() __lowercase = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ ) __lowercase = nn.BatchNormad(lowerCAmelCase_ ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self , lowerCAmelCase_ ): __lowercase = self.convolution(lowerCAmelCase_ ) __lowercase = self.normalization(lowerCAmelCase_ ) __lowercase = self.activation(lowerCAmelCase_ ) return hidden_state class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ ): super().__init__() __lowercase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) __lowercase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) __lowercase = config.num_channels def snake_case__ ( self , lowerCAmelCase_ ): __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) __lowercase = self.embedder(lowerCAmelCase_ ) __lowercase = self.pooler(lowerCAmelCase_ ) return embedding class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ): super().__init__() __lowercase = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) __lowercase = nn.BatchNormad(lowerCAmelCase_ ) def snake_case__ ( self , lowerCAmelCase_ ): __lowercase = self.convolution(lowerCAmelCase_ ) __lowercase = self.normalization(lowerCAmelCase_ ) return hidden_state class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ): super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , ) __lowercase = ACTaFN[activation] def snake_case__ ( self , lowerCAmelCase_ ): __lowercase = hidden_state __lowercase = self.layer(lowerCAmelCase_ ) __lowercase = self.shortcut(lowerCAmelCase_ ) hidden_state += residual __lowercase = self.activation(lowerCAmelCase_ ) return hidden_state class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ): super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = out_channels // reduction __lowercase = ( ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) __lowercase = ACTaFN[activation] def snake_case__ ( self , lowerCAmelCase_ ): __lowercase = hidden_state __lowercase = self.layer(lowerCAmelCase_ ) __lowercase = self.shortcut(lowerCAmelCase_ ) hidden_state += residual __lowercase = self.activation(lowerCAmelCase_ ) return hidden_state class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ): super().__init__() __lowercase = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case__ ( self , lowerCAmelCase_ ): __lowercase = input for layer in self.layers: __lowercase = layer(lowerCAmelCase_ ) return hidden_state class snake_case ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ ): super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ): __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(lowerCAmelCase_ ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , ) class snake_case ( __snake_case ): """simple docstring""" __lowerCAmelCase = ResNetConfig __lowerCAmelCase = """resnet""" __lowerCAmelCase = """pixel_values""" __lowerCAmelCase = True def snake_case__ ( self , lowerCAmelCase_ ): if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=False ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase = value lowerCAmelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowerCAmelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """The bare ResNet model outputting raw features without any specific head on top.""" ,__snake_case ,) class snake_case ( __snake_case ): """simple docstring""" def __init__( self , lowerCAmelCase_ ): super().__init__(lowerCAmelCase_ ) __lowercase = config __lowercase = ResNetEmbeddings(lowerCAmelCase_ ) __lowercase = ResNetEncoder(lowerCAmelCase_ ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(lowerCAmelCase_ ) __lowercase = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( """ ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ ,__snake_case ,) class snake_case ( __snake_case ): """simple docstring""" def __init__( self , lowerCAmelCase_ ): super().__init__(lowerCAmelCase_ ) __lowercase = config.num_labels __lowercase = ResNetModel(lowerCAmelCase_ ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ): __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(lowerCAmelCase_ ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = "single_label_classification" else: __lowercase = "multi_label_classification" if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states ) @add_start_docstrings( """ ResNet backbone, to be used with frameworks like DETR and MaskFormer. """ ,__snake_case ,) class snake_case ( __snake_case ,__snake_case ): """simple docstring""" def __init__( self , lowerCAmelCase_ ): super().__init__(lowerCAmelCase_ ) super()._init_backbone(lowerCAmelCase_ ) __lowercase = [config.embedding_size] + config.hidden_sizes __lowercase = ResNetEmbeddings(lowerCAmelCase_ ) __lowercase = ResNetEncoder(lowerCAmelCase_ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = self.embedder(lowerCAmelCase_ ) __lowercase = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) __lowercase = outputs.hidden_states __lowercase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: __lowercase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
321
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A = logging.get_logger(__name__) class UpperCAmelCase__ ( UpperCamelCase ): lowerCAmelCase_ : Tuple = ["""pixel_values"""] def __init__( self : Optional[Any] , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : int = 0.9 , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , **snake_case : str , ) -> None: '''simple docstring''' super().__init__(**snake_case ) A = size if size is not None else {'shortest_edge': 224} A = get_size_dict(snake_case , default_to_square=snake_case ) A = crop_size if crop_size is not None else {'height': 224, 'width': 224} A = get_size_dict(snake_case , param_name='crop_size' ) A = do_resize A = size A = crop_pct A = resample A = do_center_crop A = crop_size A = do_rescale A = rescale_factor A = do_normalize A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN A = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A_ ( self : List[Any] , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[float] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ) -> np.ndarray: '''simple docstring''' A = get_size_dict(snake_case , default_to_square=snake_case ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) if crop_pct is not None: if "shortest_edge" in size: A = int(size['shortest_edge'] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: A = int(size['height'] / crop_pct ) else: A = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct )) else: raise ValueError('Invalid size for resize: {}'.format(snake_case ) ) A = get_resize_output_image_size(snake_case , size=snake_case , default_to_square=snake_case ) else: if "shortest_edge" in size: A = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case ) elif "height" in size and "width" in size: A = (size['height'], size['width']) else: raise ValueError('Invalid size for resize: {}'.format(snake_case ) ) return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case ) def A_ ( self : Any , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ) -> np.ndarray: '''simple docstring''' A = get_size_dict(snake_case ) if "height" not in size or "width" not in size: raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case ) def A_ ( self : Optional[Any] , snake_case : np.ndarray , snake_case : Union[int, float] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Tuple , ) -> Union[str, Any]: '''simple docstring''' return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case ) def A_ ( self : List[Any] , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[Any] , ) -> np.ndarray: '''simple docstring''' return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case ) def A_ ( self : Optional[int] , snake_case : ImageInput , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : int = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : Dict[str, int] = None , snake_case : bool = None , snake_case : float = None , snake_case : bool = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : ChannelDimension = ChannelDimension.FIRST , **snake_case : List[Any] , ) -> PIL.Image.Image: '''simple docstring''' A = do_resize if do_resize is not None else self.do_resize A = crop_pct if crop_pct is not None else self.crop_pct A = resample if resample is not None else self.resample A = do_center_crop if do_center_crop is not None else self.do_center_crop A = do_rescale if do_rescale is not None else self.do_rescale A = rescale_factor if rescale_factor is not None else self.rescale_factor A = do_normalize if do_normalize is not None else self.do_normalize A = image_mean if image_mean is not None else self.image_mean A = image_std if image_std is not None else self.image_std A = size if size is not None else self.size A = get_size_dict(snake_case , default_to_square=snake_case ) A = crop_size if crop_size is not None else self.crop_size A = get_size_dict(snake_case , param_name='crop_size' ) A = make_list_of_images(snake_case ) if not valid_images(snake_case ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_pct is None: raise ValueError('Crop_pct must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. A = [to_numpy_array(snake_case ) for image in images] if do_resize: A = [self.resize(image=snake_case , size=snake_case , crop_pct=snake_case , resample=snake_case ) for image in images] if do_center_crop: A = [self.center_crop(image=snake_case , size=snake_case ) for image in images] if do_rescale: A = [self.rescale(image=snake_case , scale=snake_case ) for image in images] if do_normalize: A = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images] A = [to_channel_dimension_format(snake_case , snake_case ) for image in images] A = {'pixel_values': images} return BatchFeature(data=snake_case , tensor_type=snake_case )
719
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar A = TypeVar('T') class UpperCAmelCase__ ( Generic[T] ): lowerCAmelCase_ : deque[T] # Cache store of keys lowerCAmelCase_ : set[T] # References of the keys in cache lowerCAmelCase_ : int = 10 # Maximum capacity of cache def __init__( self : int , snake_case : int ) -> None: '''simple docstring''' A = deque() A = set() if not n: A = sys.maxsize elif n < 0: raise ValueError('n should be an integer greater than 0.' ) else: A = n def A_ ( self : Optional[Any] , snake_case : T ) -> None: '''simple docstring''' if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: A = self.dq_store.pop() self.key_reference.remove(snake_case ) else: self.dq_store.remove(snake_case ) self.dq_store.appendleft(snake_case ) self.key_reference.add(snake_case ) def A_ ( self : Dict ) -> None: '''simple docstring''' for k in self.dq_store: print(snake_case ) def __repr__( self : int ) -> str: '''simple docstring''' return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() A = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
109
0
"""simple docstring""" from itertools import permutations def __magic_name__ ( _lowerCamelCase : tuple ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __a : List[Any] = [7, 1_1, 1_3, 1_7] for i, test in enumerate(_lowerCamelCase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def __magic_name__ ( _lowerCamelCase : int = 1_0 ): return sum( int("""""".join(map(_lowerCamelCase , _lowerCamelCase ) ) ) for num in permutations(range(_lowerCamelCase ) ) if is_substring_divisible(_lowerCamelCase ) ) if __name__ == "__main__": print(f'{solution() = }')
581
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowercase__ = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
581
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( lowercase__ , unittest.TestCase ): _A = DanceDiffusionPipeline _A = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _A = PipelineTesterMixin.required_optional_params - { """callback""", """latents""", """callback_steps""", """output_type""", """num_images_per_prompt""", } _A = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _A = False _A = False def lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_: Optional[int] = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=__lowercase , use_timestep_embedding=__lowercase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , ) lowerCamelCase_: Optional[int] = IPNDMScheduler() lowerCamelCase_: List[str] = { """unet""": unet, """scheduler""": scheduler, } return components def lowerCAmelCase ( self : int , A_ : int , A_ : Optional[Any]=0 ) -> int: """simple docstring""" if str(__lowercase ).startswith("""mps""" ): lowerCamelCase_: List[str] = torch.manual_seed(__lowercase ) else: lowerCamelCase_: Union[str, Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) lowerCamelCase_: List[Any] = { """batch_size""": 1, """generator""": generator, """num_inference_steps""": 4, } return inputs def lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_: List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase_: Optional[int] = self.get_dummy_components() lowerCamelCase_: Union[str, Any] = DanceDiffusionPipeline(**__lowercase ) lowerCamelCase_: List[str] = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCamelCase_: int = self.get_dummy_inputs(__lowercase ) lowerCamelCase_: Optional[Any] = pipe(**__lowercase ) lowerCamelCase_: str = output.audios lowerCamelCase_: str = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) lowerCamelCase_: Tuple = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowerCAmelCase ( self : Union[str, Any] ) -> Dict: """simple docstring""" return super().test_save_load_local() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) @skip_mps def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" return super().test_attention_slicing_forward_pass() def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Any ) -> str: """simple docstring""" lowerCamelCase_: List[str] = torch_device lowerCamelCase_: List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" ) lowerCamelCase_: str = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCamelCase_: List[Any] = torch.manual_seed(0 ) lowerCamelCase_: Dict = pipe(generator=__lowercase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCamelCase_: List[Any] = output.audios lowerCamelCase_: Any = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCamelCase_: Optional[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase ( self : str ) -> str: """simple docstring""" lowerCamelCase_: Optional[int] = torch_device lowerCamelCase_: Dict = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa ) lowerCamelCase_: Tuple = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) lowerCamelCase_: List[Any] = torch.manual_seed(0 ) lowerCamelCase_: List[Any] = pipe(generator=__lowercase , num_inference_steps=1_00 , audio_length_in_s=4.096 ) lowerCamelCase_: List[Any] = output.audios lowerCamelCase_: Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) lowerCamelCase_: Tuple = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
715
from __future__ import annotations import os from collections.abc import Mapping lowercase : str = tuple[int, int] class a__ : def __init__( self : Optional[Any] , A_ : set[int] , A_ : Mapping[EdgeT, int] ) -> None: """simple docstring""" lowerCamelCase_: set[int] = vertices lowerCamelCase_: dict[EdgeT, int] = { (min(A_ ), max(A_ )): weight for edge, weight in edges.items() } def lowerCAmelCase ( self : Optional[int] , A_ : EdgeT , A_ : int ) -> None: """simple docstring""" self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCamelCase_: Tuple = weight def lowerCAmelCase ( self : List[str] ) -> Graph: """simple docstring""" lowerCamelCase_: Graph = Graph({min(self.vertices )} , {} ) lowerCamelCase_: EdgeT lowerCamelCase_: int lowerCamelCase_: EdgeT lowerCamelCase_: int while len(subgraph.vertices ) < len(self.vertices ): lowerCamelCase_: List[str] = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCamelCase_: List[str] = edge lowerCamelCase_: str = weight subgraph.add_edge(A_ , A_ ) return subgraph def UpperCAmelCase_ ( _UpperCAmelCase = "p107_network.txt" ): lowerCamelCase_: str = os.path.abspath(os.path.dirname(_UpperCAmelCase ) ) lowerCamelCase_: str = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase_: dict[EdgeT, int] = {} lowerCamelCase_: list[str] lowerCamelCase_: int lowerCamelCase_: int with open(_UpperCAmelCase ) as f: lowerCamelCase_: Optional[int] = f.read().strip().split("""\n""" ) lowerCamelCase_: Dict = [line.split(""",""" ) for line in data] for edgea in range(1 , len(_UpperCAmelCase ) ): for edgea in range(_UpperCAmelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCamelCase_: Any = int(adjaceny_matrix[edgea][edgea] ) lowerCamelCase_: Graph = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase ) lowerCamelCase_: Graph = graph.prims_algorithm() lowerCamelCase_: int = sum(graph.edges.values() ) lowerCamelCase_: int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
584
0
"""simple docstring""" _a : Optional[Any] = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } _a : Optional[int] = {value: key for key, value in encode_dict.items()} def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str: _lowerCAmelCase : List[Any] = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str: if set(_lowerCamelCase ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) _lowerCAmelCase : Optional[int] = """""" for word in coded.split(): while len(_lowerCamelCase ) != 0: decoded += decode_dict[word[:5]] _lowerCAmelCase : Union[str, Any] = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
213
"""simple docstring""" import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __A : @staticmethod def __A ( *a__ , **a__ ): pass @is_pipeline_test @require_vision @require_torch class __A ( unittest.TestCase ): _UpperCamelCase : Dict = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self , a__ , a__ , a__ ): _lowerCAmelCase : Dict = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) _lowerCAmelCase : Optional[int] = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def __A ( self , a__ , a__ ): _lowerCAmelCase : List[str] = object_detector(examples[0] , threshold=0.0 ) _lowerCAmelCase : List[str] = len(a__ ) self.assertGreater(a__ , 0 ) self.assertEqual( a__ , [ { """score""": ANY(a__ ), """label""": ANY(a__ ), """box""": {"""xmin""": ANY(a__ ), """ymin""": ANY(a__ ), """xmax""": ANY(a__ ), """ymax""": ANY(a__ )}, } for i in range(a__ ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ): pass @require_torch def __A ( self ): _lowerCAmelCase : Any = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) _lowerCAmelCase : Tuple = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] , ) _lowerCAmelCase : Optional[Any] = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {"""score""": 0.7_2_3_5, """label""": """cat""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7_2_1_8, """label""": """remote""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.7_1_8_4, """label""": """couch""", """box""": {"""xmin""": 204, """ymin""": 167, """xmax""": 232, """ymax""": 190}}, {"""score""": 0.6_7_4_8, """label""": """remote""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6_6_5_6, """label""": """cat""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6_6_1_4, """label""": """couch""", """box""": {"""xmin""": 571, """ymin""": 83, """xmax""": 598, """ymax""": 103}}, {"""score""": 0.6_4_5_6, """label""": """remote""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, {"""score""": 0.6_4_2, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 274, """xmax""": 93, """ymax""": 297}}, {"""score""": 0.6_4_1_9, """label""": """cat""", """box""": {"""xmin""": 494, """ymin""": 105, """xmax""": 521, """ymax""": 127}}, ] ] , ) @require_torch @slow def __A ( self ): _lowerCAmelCase : Optional[Any] = pipeline("""zero-shot-object-detection""" ) _lowerCAmelCase : Any = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ] , ) _lowerCAmelCase : List[Any] = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ [ {"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], [ {"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, {"""score""": 0.1_4_7_4, """label""": """remote""", """box""": {"""xmin""": 335, """ymin""": 74, """xmax""": 371, """ymax""": 187}}, {"""score""": 0.1_2_0_8, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 642, """ymax""": 476}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def __A ( self ): pass @require_torch @slow def __A ( self ): _lowerCAmelCase : Union[str, Any] = 0.2 _lowerCAmelCase : List[Any] = pipeline("""zero-shot-object-detection""" ) _lowerCAmelCase : int = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=a__ , ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, {"""score""": 0.2_5_3_7, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 315, """ymax""": 472}}, ] , ) @require_torch @slow def __A ( self ): _lowerCAmelCase : List[str] = 2 _lowerCAmelCase : int = pipeline("""zero-shot-object-detection""" ) _lowerCAmelCase : Optional[Any] = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=a__ , ) self.assertEqual( nested_simplify(a__ , decimals=4 ) , [ {"""score""": 0.2_8_6_8, """label""": """cat""", """box""": {"""xmin""": 324, """ymin""": 20, """xmax""": 640, """ymax""": 373}}, {"""score""": 0.2_7_7, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 177, """ymax""": 115}}, ] , )
213
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _A : Optional[Any] =None _A : List[str] =logging.get_logger(__name__) _A : Optional[Any] ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} _A : int ={ '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } _A : Optional[int] ={ '''moussaKam/mbarthez''': 1_0_2_4, '''moussaKam/barthez''': 1_0_2_4, '''moussaKam/barthez-orangesum-title''': 1_0_2_4, } _A : int ='''▁''' class lowerCamelCase__ ( A ): '''simple docstring''' A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ["""input_ids""", """attention_mask"""] A_ = BarthezTokenizer def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : List[str]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Union[str, Any]="<mask>" , **UpperCamelCase_ : int , ) -> str: '''simple docstring''' _lowercase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) _lowercase : Optional[Any] = vocab_file _lowercase : Optional[Any] = False if not self.vocab_file else True def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowercase : Optional[Any] = [self.cls_token_id] _lowercase : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _lowercase : List[str] = [self.sep_token_id] _lowercase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCAmelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowercase : Any = os.path.join( UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
712
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _A : int =logging.get_logger(__name__) _A : Union[str, Any] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase_ : str=1408 , UpperCamelCase_ : Tuple=6144 , UpperCamelCase_ : Union[str, Any]=39 , UpperCamelCase_ : Optional[Any]=16 , UpperCamelCase_ : str=224 , UpperCamelCase_ : Dict=14 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : int=1E-6 , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : List[str]=1E-10 , UpperCamelCase_ : str=True , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(**UpperCamelCase_ ) _lowercase : Optional[Any] = hidden_size _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[int] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : Tuple = patch_size _lowercase : Dict = image_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = attention_dropout _lowercase : int = layer_norm_eps _lowercase : Optional[int] = hidden_act _lowercase : str = qkv_bias @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : Tuple = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip_qformer""" def __init__( self : Tuple , UpperCamelCase_ : Union[str, Any]=3_0522 , UpperCamelCase_ : Union[str, Any]=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : List[str]=3072 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Any=512 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : List[Any]=1E-12 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : str="absolute" , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Any=1408 , **UpperCamelCase_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) _lowercase : Dict = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Any = num_hidden_layers _lowercase : List[Any] = num_attention_heads _lowercase : Optional[int] = hidden_act _lowercase : Union[str, Any] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : Optional[int] = initializer_range _lowercase : Tuple = layer_norm_eps _lowercase : List[str] = position_embedding_type _lowercase : str = cross_attention_frequency _lowercase : int = encoder_hidden_size @classmethod def __UpperCAmelCase ( cls : List[Any] , UpperCamelCase_ : Union[str, os.PathLike] , **UpperCamelCase_ : List[str] ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase_ ) _lowercase , _lowercase : List[str] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": _lowercase : Optional[int] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class lowerCamelCase__ ( A ): '''simple docstring''' A_ = """instructblip""" A_ = True def __init__( self : Any , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=32 , **UpperCamelCase_ : int ) -> List[str]: '''simple docstring''' super().__init__(**UpperCamelCase_ ) if vision_config is None: _lowercase : Any = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: _lowercase : List[Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: _lowercase : List[Any] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowercase : List[Any] = InstructBlipVisionConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = InstructBlipQFormerConfig(**UpperCamelCase_ ) _lowercase : Union[str, Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' _lowercase : int = CONFIG_MAPPING[text_model_type](**UpperCamelCase_ ) _lowercase : str = self.text_config.tie_word_embeddings _lowercase : int = self.text_config.is_encoder_decoder _lowercase : Tuple = num_query_tokens _lowercase : str = self.vision_config.hidden_size _lowercase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowercase : List[Any] = 1.0 _lowercase : int = 0.02 @classmethod def __UpperCAmelCase ( cls : Tuple , UpperCamelCase_ : InstructBlipVisionConfig , UpperCamelCase_ : InstructBlipQFormerConfig , UpperCamelCase_ : PretrainedConfig , **UpperCamelCase_ : Dict , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase_ , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = copy.deepcopy(self.__dict__ ) _lowercase : Optional[int] = self.vision_config.to_dict() _lowercase : Optional[Any] = self.qformer_config.to_dict() _lowercase : Tuple = self.text_config.to_dict() _lowercase : Dict = self.__class__.model_type return output
4
0
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class SCREAMING_SNAKE_CASE__ ( yaml.SafeLoader ): """simple docstring""" def a__ ( self , A ) -> List[str]: A: Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] A: int = [tuple(A ) if isinstance(A , A ) else key for key in keys] A: Union[str, Any] = Counter(A ) A: Optional[int] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' ) def a__ ( self , A , A=False ) -> Optional[Any]: A: Tuple = super().construct_mapping(A , deep=A ) self._check_no_duplicates_on_constructed_node(A ) return mapping def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str ): '''simple docstring''' A: Any = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: A: str = full_content[1:].index("""---""" ) + 1 A: Tuple = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowerCamelCase__ ) class SCREAMING_SNAKE_CASE__ ( snake_case_ ): """simple docstring""" A__ : Optional[int] = {'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def a__ ( cls , A ) -> "DatasetMetadata": with open(A , encoding="""utf-8""" ) as readme_file: A , A: Dict = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(A ) else: return cls() def a__ ( self , A ) -> Optional[Any]: if path.exists(): with open(A , encoding="""utf-8""" ) as readme_file: A: List[Any] = readme_file.read() else: A: Any = None A: Union[str, Any] = self._to_readme(A ) with open(A , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(A ) def a__ ( self , A = None ) -> str: if readme_content is not None: A , A: Any = _split_yaml_from_readme(A ) A: List[Any] = """---\n""" + self.to_yaml_string() + """---\n""" + content else: A: Union[str, Any] = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def a__ ( cls , A ) -> "DatasetMetadata": A: Dict = yaml.load(A , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields A: int = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**A ) def a__ ( self ) -> str: return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=A , allow_unicode=A , encoding="""utf-8""" , ).decode("""utf-8""" ) __SCREAMING_SNAKE_CASE : str ={ 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser __SCREAMING_SNAKE_CASE : Any =ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') __SCREAMING_SNAKE_CASE : List[Any] =ap.parse_args() __SCREAMING_SNAKE_CASE : int =Path(args.readme_filepath) __SCREAMING_SNAKE_CASE : Any =DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
135
'''simple docstring''' import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def a__ ( self , A ) -> Union[str, Any]: A: Any = 3 A: str = 2_50 A: Tuple = ids_tensor((batch_size, length) , A ) A: Any = torch.ones((batch_size, length) , device=A , dtype=torch.float ) / length return input_ids, scores def a__ ( self ) -> str: A , A: Tuple = self._get_tensors(5 ) A: Dict = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(A , A ) ) A , A: int = self._get_tensors(9 ) self.assertFalse(criteria(A , A ) ) A , A: Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(A , A ) ) def a__ ( self ) -> Optional[int]: A: List[Any] = MaxLengthCriteria(max_length=10 ) A , A: List[str] = self._get_tensors(5 ) self.assertFalse(criteria(A , A ) ) A , A: int = self._get_tensors(9 ) self.assertFalse(criteria(A , A ) ) A , A: Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(A , A ) ) def a__ ( self ) -> Any: A: Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) A , A: Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(A , A ) ) A , A: Any = self._get_tensors(9 ) self.assertFalse(criteria(A , A ) ) A , A: Union[str, Any] = self._get_tensors(10 ) self.assertTrue(criteria(A , A ) ) A: str = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def a__ ( self ) -> Tuple: A , A: Optional[Any] = self._get_tensors(5 ) A: Dict = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(A , A ) ) A: Any = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(A , A ) ) def a__ ( self ) -> int: validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(A ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) A: List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(A ) , 1 )
135
1
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() a_ : str = logging.get_logger(__name__) a_ : int = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } a_ : str = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" for attribute in key.split("." ): lowerCamelCase = getattr(snake_case__ , snake_case__ ) if weight_type is not None: lowerCamelCase = getattr(snake_case__ , snake_case__ ).shape else: lowerCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCamelCase = value elif weight_type == "weight_g": lowerCamelCase = value elif weight_type == "weight_v": lowerCamelCase = value elif weight_type == "bias": lowerCamelCase = value else: lowerCamelCase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = [] lowerCamelCase = fairseq_model.state_dict() lowerCamelCase = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == "group" , ) lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCamelCase = True if "*" in mapped_key: lowerCamelCase = name.split(snake_case__ )[0].split("." )[-2] lowerCamelCase = mapped_key.replace("*" , snake_case__ ) if "weight_g" in name: lowerCamelCase = "weight_g" elif "weight_v" in name: lowerCamelCase = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: lowerCamelCase = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase = "weight" else: lowerCamelCase = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = full_name.split("conv_layers." )[-1] lowerCamelCase = name.split("." ) lowerCamelCase = int(items[0] ) lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ): """simple docstring""" lowerCamelCase = torch.load(snake_case__ ) lowerCamelCase = WavLMConfigOrig(checkpoint["cfg"] ) lowerCamelCase = WavLMOrig(snake_case__ ) model.load_state_dict(checkpoint["model"] ) model.eval() if config_path is not None: lowerCamelCase = WavLMConfig.from_pretrained(snake_case__ ) else: lowerCamelCase = WavLMConfig() lowerCamelCase = WavLMModel(snake_case__ ) recursively_load_weights(snake_case__ , snake_case__ ) hf_wavlm.save_pretrained(snake_case__ ) if __name__ == "__main__": a_ : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ : Union[str, Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
717
a_ : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' a_ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] a_ : str = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
484
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _a ( __lowerCAmelCase ): SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image_processor""", """tokenizer"""] SCREAMING_SNAKE_CASE_ : Tuple = """CLIPImageProcessor""" SCREAMING_SNAKE_CASE_ : Dict = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""") def __init__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> Any: _snake_case = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." ,_SCREAMING_SNAKE_CASE ,) _snake_case = kwargs.pop("feature_extractor" ) _snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) def __call__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]: if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: _snake_case = self.tokenizer(_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) if images is not None: _snake_case = self.image_processor(_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) if text is not None and images is not None: _snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) ,tensor_type=_SCREAMING_SNAKE_CASE ) def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> int: return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def _lowercase ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> str: return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) @property def _lowercase ( self ) -> Optional[int]: _snake_case = self.tokenizer.model_input_names _snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
185
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels UpperCamelCase_ : Optional[int] = object() # For specifying empty leaf dict `{}` UpperCamelCase_ : Optional[int] = object() def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] ) -> Optional[Any]: """simple docstring""" _snake_case = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(_UpperCamelCase ) - len(_UpperCamelCase ) + 1 ): _snake_case = [x.match(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , ks[i:] )] if matches and all(_UpperCamelCase ): return True return False def __a ( _UpperCamelCase: Optional[Any] ) -> Union[str, Any]: """simple docstring""" def replace(_UpperCamelCase: Tuple , _UpperCamelCase: List[str] ): for rule, replacement in rules: if _match(_UpperCamelCase , _UpperCamelCase ): return replacement return val return replace def __a ( ) -> Any: """simple docstring""" return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , _UpperCamelCase )), (("transformer", "wte", "embedding"), P("mp" , _UpperCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_UpperCamelCase , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , _UpperCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(_UpperCamelCase , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , _UpperCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __a ( _UpperCamelCase: Union[str, Any] ) -> Any: """simple docstring""" _snake_case = _get_partition_rules() _snake_case = _replacement_rules(_UpperCamelCase ) _snake_case = {k: _unmatched for k in flatten_dict(_UpperCamelCase )} _snake_case = {k: replace(_UpperCamelCase , _UpperCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(_UpperCamelCase ) )
185
1
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase ): if p < 2: raise ValueError('''p should not be less than 2!''' ) elif p == 2: return True lowercase__ : str = 4 lowercase__ : List[str] = (1 << p) - 1 for _ in range(p - 2 ): lowercase__ : Union[str, Any] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
428
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __a: Dict = { """configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""], """processing_speech_to_text""": ["""Speech2TextProcessor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: Tuple = ["""Speech2TextTokenizer"""] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: Optional[Any] = ["""Speech2TextFeatureExtractor"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: str = [ """TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSpeech2TextForConditionalGeneration""", """TFSpeech2TextModel""", """TFSpeech2TextPreTrainedModel""", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a: int = [ """SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Speech2TextForConditionalGeneration""", """Speech2TextModel""", """Speech2TextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
428
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=32 * 4 , _UpperCamelCase=32 * 6 , _UpperCamelCase=4 , _UpperCamelCase=32 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def UpperCamelCase( self ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCamelCase ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase( self ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() def comm_check_on_output(_UpperCamelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase ) comm_check_on_output(_UpperCamelCase ) _UpperCAmelCase = model( pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ) comm_check_on_output(_UpperCamelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __UpperCamelCase ( A__ , A__ , unittest.TestCase ): __A : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __A : int = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __A : List[str] = False __A : Optional[Any] = False __A : Union[str, Any] = False __A : Optional[Any] = False def UpperCamelCase( self ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def UpperCamelCase( self ): pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCamelCase( self ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase( self ): pass def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { '''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCamelCase ), '''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCamelCase ), '''class_labels''': torch.zeros(2 , 10 , device=_UpperCamelCase ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase ) _UpperCAmelCase = model(**_UpperCamelCase ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_UpperCamelCase ).to(_UpperCamelCase ) _UpperCAmelCase = model(**_UpperCamelCase , output_attentions=_UpperCamelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase( self ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.train() _UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss loss.backward() def UpperCamelCase( self ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.train() _UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCamelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCAmelCase_ = 1e-4 def A__ ( ) -> Dict: """simple docstring""" _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase( self ): return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def UpperCamelCase( self ): _UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) _UpperCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) _UpperCAmelCase = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) _UpperCAmelCase = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) def UpperCamelCase( self ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(_UpperCamelCase ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) _UpperCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] _UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) def UpperCamelCase( self ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(_UpperCamelCase ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) _UpperCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) def UpperCamelCase( self ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(_UpperCamelCase ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) _UpperCAmelCase = inputs['''pixel_values'''].to(_UpperCamelCase ) _UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''mask_labels''']] _UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) self.assertTrue(outputs.loss is not None )
32
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Optional[int] = "perceiver" def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]: super().__init__(**SCREAMING_SNAKE_CASE__ ) A__ = num_latents A__ = d_latents A__ = d_model A__ = num_blocks A__ = num_self_attends_per_block A__ = num_self_attention_heads A__ = num_cross_attention_heads A__ = qk_channels A__ = v_channels A__ = cross_attention_shape_for_attention A__ = self_attention_widening_factor A__ = cross_attention_widening_factor A__ = hidden_act A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = use_query_residual # masked language modeling attributes A__ = vocab_size A__ = max_position_embeddings # image classification attributes A__ = image_size # flow attributes A__ = train_size # multimodal autoencoding attributes A__ = num_frames A__ = audio_samples_per_frame A__ = samples_per_patch A__ = output_shape class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" @property def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A__ = {0: "batch", 1: "choice", 2: "sequence"} else: A__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("inputs", dynamic_axis), ("attention_mask", dynamic_axis), ] ) @property def snake_case__ ( self ) -> float: return 1e-4 def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ) -> Mapping[str, Any]: # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) A__ = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence A__ = [" ".join(["a"] ) * seq_length] * batch_size A__ = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) A__ = inputs.pop("input_ids" ) return inputs elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch ) A__ = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) A__ = inputs.pop("pixel_values" ) return inputs else: raise ValueError( "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
104
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = ["a", "b", "c"] # Defaults to last layer if both are None _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , ["c"] ) self.assertEqual(__UpperCamelCase , [2] ) # Out indices set to match out features _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , __UpperCamelCase , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , ["a", "c"] ) self.assertEqual(__UpperCamelCase , [0, 2] ) # Out features set to match out indices _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(__UpperCamelCase , [0, 2] , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , ["a", "c"] ) self.assertEqual(__UpperCamelCase , [0, 2] ) # Out features selected from negative indices _UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(__UpperCamelCase , [-3, -1] , __UpperCamelCase ) self.assertEqual(__UpperCamelCase , ["a", "c"] ) self.assertEqual(__UpperCamelCase , [-3, -1] ) def UpperCAmelCase__ ( self : Optional[Any] ): # Stage names must be set with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(["a", "b"] , (0, 1) , __UpperCamelCase ) # Out features must be a list with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(__UpperCamelCase , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(__UpperCamelCase , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(__UpperCamelCase ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = BackboneMixin() _UpperCAmelCase = ["a", "b", "c"] _UpperCAmelCase = ["a", "c"] _UpperCAmelCase = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _UpperCAmelCase = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) _UpperCAmelCase = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
704
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : str , __UpperCamelCase : Any , __UpperCamelCase : List[str]=13 , __UpperCamelCase : Dict=32 , __UpperCamelCase : str=2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : str=16 , __UpperCamelCase : Any=[1, 2, 1] , __UpperCamelCase : Dict=[2, 2, 4] , __UpperCamelCase : List[str]=2 , __UpperCamelCase : int=2.0 , __UpperCamelCase : str=True , __UpperCamelCase : str=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : str="gelu" , __UpperCamelCase : str=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=0.02 , __UpperCamelCase : int=1e-5 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[str]=True , __UpperCamelCase : Dict=10 , __UpperCamelCase : Any=8 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : List[str] ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ): _UpperCAmelCase = SwinvaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] ): _UpperCAmelCase = SwinvaForMaskedImageModeling(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = SwinvaForMaskedImageModeling(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ): _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = SwinvaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : int = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Optional[Any] = False def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = SwinvaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 ) def UpperCAmelCase__ ( self : Dict ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def UpperCAmelCase__ ( self : Optional[int] ): pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions _UpperCAmelCase = len(self.model_tester.depths ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = config.window_size**2 _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) _UpperCAmelCase = len(__UpperCamelCase ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): _UpperCAmelCase = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states _UpperCAmelCase = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ): _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # Swinv2 has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _UpperCAmelCase = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape _UpperCAmelCase = ( reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = SwinvaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def UpperCAmelCase__ ( self : Optional[int] ): return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
129
0
from __future__ import annotations import numpy as np def UpperCAmelCase__ ( __snake_case ) -> tuple[np.ndarray, np.ndarray]: _A , _A = np.shape(__snake_case ) if rows != columns: _A = ( '''\'table\' has to be of square shaped array but got a ''' F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(__snake_case ) _A = np.zeros((rows, columns) ) _A = np.zeros((rows, columns) ) for i in range(__snake_case ): for j in range(__snake_case ): _A = sum(lower[i][k] * upper[k][j] for k in range(__snake_case ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) _A = (table[i][j] - total) / upper[j][j] _A = 1 for j in range(__snake_case , __snake_case ): _A = sum(lower[i][k] * upper[k][j] for k in range(__snake_case ) ) _A = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
317
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase = { """configuration_table_transformer""": [ """TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TableTransformerConfig""", """TableTransformerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase = [ """TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TableTransformerForObjectDetection""", """TableTransformerModel""", """TableTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys __lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
317
1
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) lowerCAmelCase_ : int =nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
305
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __lowercase = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _snake_case ( datasets.BuilderConfig ): """simple docstring""" _UpperCamelCase : Optional[datasets.Features] = None def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): import pyspark def generate_fn(): lowerCAmelCase_ : Optional[int] =df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: lowerCAmelCase_ : Tuple =df_with_partition_id.select('''*''' ).where(f'part_id = {partition_id}' ).drop('''part_id''' ) lowerCAmelCase_ : Dict =partition_df.collect() lowerCAmelCase_ : Union[str, Any] =0 for row in rows: yield f'{partition_id}_{row_id}', row.asDict() row_id += 1 return generate_fn class _snake_case ( _BaseExamplesIterable ): """simple docstring""" def __init__( self : str , UpperCamelCase_ : "pyspark.sql.DataFrame" , UpperCamelCase_ : str=None , ): lowerCAmelCase_ : Optional[Any] =df lowerCAmelCase_ : Any =partition_order or range(self.df.rdd.getNumPartitions() ) lowerCAmelCase_ : Any =_generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : str ): yield from self.generate_examples_fn() def __A ( self : Optional[int] , UpperCamelCase_ : np.random.Generator ): lowerCAmelCase_ : Tuple =list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=UpperCamelCase_ ) def __A ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : int ): lowerCAmelCase_ : Union[str, Any] =self.split_shard_indices_by_worker(UpperCamelCase_ , UpperCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=UpperCamelCase_ ) @property def __A ( self : Any ): return len(self.partition_order ) class _snake_case ( datasets.DatasetBuilder ): """simple docstring""" _UpperCamelCase : List[str] = SparkConfig def __init__( self : Union[str, Any] , UpperCamelCase_ : "pyspark.sql.DataFrame" , UpperCamelCase_ : str = None , UpperCamelCase_ : str = None , **UpperCamelCase_ : Optional[int] , ): import pyspark lowerCAmelCase_ : Any =pyspark.sql.SparkSession.builder.getOrCreate() lowerCAmelCase_ : int =df lowerCAmelCase_ : Any =working_dir super().__init__( cache_dir=UpperCamelCase_ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase_ , ) def __A ( self : Union[str, Any] ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCamelCase_ : Dict ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=UpperCamelCase_ ) lowerCAmelCase_ : Union[str, Any] =os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCamelCase_ , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: lowerCAmelCase_ : int =( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def __A ( self : Union[str, Any] ): return datasets.DatasetInfo(features=self.config.features ) def __A ( self : Optional[Any] , UpperCamelCase_ : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __A ( self : Dict , UpperCamelCase_ : str ): import pyspark def get_arrow_batch_size(UpperCamelCase_ : Dict ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) lowerCAmelCase_ : Any =self.df.count() lowerCAmelCase_ : List[str] =df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. lowerCAmelCase_ : Optional[Any] =( self.df.limit(UpperCamelCase_ ) .repartition(1 ) .mapInArrow(UpperCamelCase_ , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) lowerCAmelCase_ : List[Any] =approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. lowerCAmelCase_ : List[Any] =min(UpperCamelCase_ , int(approx_total_size / max_shard_size ) ) lowerCAmelCase_ : Optional[int] =self.df.repartition(UpperCamelCase_ ) def __A ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int , ): import pyspark lowerCAmelCase_ : Any =ParquetWriter if file_format == '''parquet''' else ArrowWriter lowerCAmelCase_ : Any =os.path.join(self._working_dir , os.path.basename(UpperCamelCase_ ) ) if self._working_dir else fpath lowerCAmelCase_ : Dict =file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. lowerCAmelCase_ : Dict =self.config.features lowerCAmelCase_ : str =self._writer_batch_size lowerCAmelCase_ : List[str] =self._fs.storage_options def write_arrow(UpperCamelCase_ : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. lowerCAmelCase_ : List[str] =pyspark.TaskContext().taskAttemptId() lowerCAmelCase_ : Union[str, Any] =next(UpperCamelCase_ , UpperCamelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) lowerCAmelCase_ : List[str] =0 lowerCAmelCase_ : Any =writer_class( features=UpperCamelCase_ , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase_ , storage_options=UpperCamelCase_ , embed_local_files=UpperCamelCase_ , ) lowerCAmelCase_ : Any =pa.Table.from_batches([first_batch] ) writer.write_table(UpperCamelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 lowerCAmelCase_ : Optional[int] =writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , writer_batch_size=UpperCamelCase_ , storage_options=UpperCamelCase_ , embed_local_files=UpperCamelCase_ , ) lowerCAmelCase_ : Optional[Any] =pa.Table.from_batches([batch] ) writer.write_table(UpperCamelCase_ ) if writer._num_bytes > 0: lowerCAmelCase_ , lowerCAmelCase_ : List[str] =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCamelCase_ ) ): lowerCAmelCase_ : Union[str, Any] =os.path.join(os.path.dirname(UpperCamelCase_ ) , os.path.basename(UpperCamelCase_ ) ) shutil.move(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase_ : List[Any] =( self.df.mapInArrow(UpperCamelCase_ , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __A ( self : Tuple , UpperCamelCase_ : "datasets.SplitGenerator" , UpperCamelCase_ : str = "arrow" , UpperCamelCase_ : Optional[Union[str, int]] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : str , ): self._validate_cache_dir() lowerCAmelCase_ : List[Any] =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCamelCase_ ) lowerCAmelCase_ : Any =not is_remote_filesystem(self._fs ) lowerCAmelCase_ : Dict =os.path.join if is_local else posixpath.join lowerCAmelCase_ : Optional[Any] ='''-TTTTT-SSSSS-of-NNNNN''' lowerCAmelCase_ : Dict =F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}' lowerCAmelCase_ : Union[str, Any] =path_join(self._output_dir , UpperCamelCase_ ) lowerCAmelCase_ : List[Any] =0 lowerCAmelCase_ : Dict =0 lowerCAmelCase_ : Dict =0 lowerCAmelCase_ : Any =[] lowerCAmelCase_ : Union[str, Any] =[] for task_id, content in self._prepare_split_single(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) : List[Any] =content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCamelCase_ ) lowerCAmelCase_ : Dict =total_num_examples lowerCAmelCase_ : Optional[int] =total_num_bytes # should rename everything at the end logger.debug(F'Renaming {total_shards} shards.' ) if total_shards > 1: lowerCAmelCase_ : List[str] =all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. lowerCAmelCase_ : str =self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , ): rename( UpperCamelCase_ , fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace('''TTTTT-SSSSS''' , F'{global_shard_id:05d}' ).replace('''NNNNN''' , F'{total_shards:05d}' ) , ) lowerCAmelCase_ : Optional[Any] =[] lowerCAmelCase_ : Union[str, Any] =0 for i in range(len(UpperCamelCase_ ) ): lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =task_id_and_num_shards[i] for shard_id in range(UpperCamelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCamelCase_ , len(UpperCamelCase_ ) ).map(lambda UpperCamelCase_ : _rename_shard(*UpperCamelCase_ ) ).collect() else: # don't use any pattern lowerCAmelCase_ : Optional[Any] =0 lowerCAmelCase_ : Optional[int] =task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , F'{shard_id:05d}' ).replace('''TTTTT''' , F'{task_id:05d}' ) , fpath.replace(UpperCamelCase_ , '''''' ) , ) def __A ( self : Dict , UpperCamelCase_ : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
305
1
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __magic_name__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = question_encoder __SCREAMING_SNAKE_CASE = generator __SCREAMING_SNAKE_CASE = self.question_encoder def snake_case_ ( self , lowerCAmelCase__): if os.path.isfile(lowerCAmelCase_): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_) __SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , """question_encoder_tokenizer""") __SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase_ , """generator_tokenizer""") self.question_encoder.save_pretrained(lowerCAmelCase_) self.generator.save_pretrained(lowerCAmelCase_) @classmethod def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer __SCREAMING_SNAKE_CASE = kwargs.pop("""config""" , lowerCAmelCase_) if config is None: __SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(lowerCAmelCase_) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( lowerCAmelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""") __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( lowerCAmelCase_ , config=config.generator , subfolder="""generator_tokenizer""") return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_) def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__): return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_) def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__): return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_) def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__): return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_) def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.question_encoder def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.generator def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "longest" , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ): warnings.warn( """`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """ """regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """ """context manager to prepare your targets. See the documentation of your specific tokenizer for more """ """details""" , lowerCAmelCase_ , ) if max_length is None: __SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length __SCREAMING_SNAKE_CASE = self( lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: __SCREAMING_SNAKE_CASE = self.current_tokenizer.model_max_length __SCREAMING_SNAKE_CASE = self( text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , ) __SCREAMING_SNAKE_CASE = labels['input_ids'] return model_inputs
155
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def lowercase__ ( ): _SCREAMING_SNAKE_CASE : dict[int, int] = {} _SCREAMING_SNAKE_CASE : List[Any] = 2 while True: _SCREAMING_SNAKE_CASE : List[Any] = factor_map.pop(lowerCamelCase, lowerCamelCase ) if factor: _SCREAMING_SNAKE_CASE : str = factor + prime while x in factor_map: x += factor _SCREAMING_SNAKE_CASE : Union[str, Any] = factor else: _SCREAMING_SNAKE_CASE : Optional[int] = prime yield prime prime += 1 def lowercase__ ( lowerCamelCase = 1E10 ): _SCREAMING_SNAKE_CASE : Dict = sieve() _SCREAMING_SNAKE_CASE : Dict = 1 while True: _SCREAMING_SNAKE_CASE : int = next(lowerCamelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(lowerCamelCase ) n += 2 if __name__ == "__main__": print(solution())
621
0
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _a : Any= True from torch.cuda.amp import autocast _a : int= logging.getLogger(__name__) @dataclass class UpperCamelCase : UpperCAmelCase : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) UpperCAmelCase : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCAmelCase : Optional[bool] = field( default=lowercase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) UpperCAmelCase : Optional[bool] = field( default=lowercase , metadata={"""help""": """Whether to log verbose messages or not."""} , ) UpperCAmelCase : Optional[float] = field( default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} ) UpperCAmelCase : Optional[float] = field( default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} ) UpperCAmelCase : Optional[float] = field( default=0.999995 , metadata={"""help""": """Decay of gumbel temperature during training."""} ) def __UpperCAmelCase ( UpperCAmelCase_ : ModelArguments , UpperCAmelCase_ : TrainingArguments ) -> int: '''simple docstring''' logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) __snake_case : Union[str, Any] = logging.WARNING if model_args.verbose_logging: __snake_case : Optional[int] = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): __snake_case : List[Any] = logging.INFO logger.setLevel(UpperCAmelCase_ ) @dataclass class UpperCamelCase : UpperCAmelCase : str = field( default=lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCAmelCase : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCAmelCase : Optional[str] = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCAmelCase : Optional[str] = field( default="""validation""" , metadata={ """help""": ( """The name of the validation data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCAmelCase : Optional[str] = field( default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , ) UpperCAmelCase : bool = field( default=lowercase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) UpperCAmelCase : Optional[int] = field( default=1 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCAmelCase : Optional[int] = field( default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCAmelCase : Optional[float] = field( default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} ) @dataclass class UpperCamelCase : UpperCAmelCase : WavaVecaForPreTraining UpperCAmelCase : WavaVecaFeatureExtractor UpperCAmelCase : Union[bool, str] = "longest" UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[int] = None def __call__(self : int , _A : List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format __snake_case : Dict = self.feature_extractor.pad( _A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) __snake_case : Tuple = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1]) __snake_case : List[str] = batch['input_values'].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula __snake_case : str = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1)).to( torch.long) __snake_case : Optional[Any] = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device) # these two operations makes sure that all values # before the output lengths indices are attended to __snake_case : Union[str, Any] = 1 __snake_case : Tuple = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() # sample randomly masked indices __snake_case : Tuple = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_A , min_masks=2 , ) return batch class UpperCamelCase ( lowercase ): def __init__(self : Union[str, Any] , *_A : Dict , _A : Any=1 , _A : int=0 , _A : Optional[int]=1.0 , **_A : Union[str, Any]) -> int: super().__init__(*_A , **_A) __snake_case : Tuple = 0 __snake_case : Dict = max_gumbel_temp __snake_case : List[Any] = min_gumbel_temp __snake_case : List[Any] = gumbel_temp_decay def _lowercase (self : Optional[int] , _A : nn.Module , _A : Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: model.train() __snake_case : Dict = self._prepare_inputs(_A) if self.use_amp: with autocast(): __snake_case : Union[str, Any] = self.compute_loss(_A , _A) else: __snake_case : Dict = self.compute_loss(_A , _A) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": __snake_case : List[str] = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": __snake_case : int = loss.sum() / (inputs['mask_time_indices']).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: __snake_case : Union[str, Any] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(_A).backward() elif self.use_apex: with amp.scale_loss(_A , self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(_A) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp)) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp)) return loss.detach() def __UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __snake_case : int = parser.parse_args_into_dataclasses() configure_logger(UpperCAmelCase_ , UpperCAmelCase_ ) # Downloading and loading a dataset from the hub. __snake_case : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" __snake_case : str = DatasetDict() __snake_case : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , ) __snake_case : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" __snake_case : Union[str, Any] = DatasetDict() __snake_case : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , ) __snake_case : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported __snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase_ ) def prepare_dataset(UpperCAmelCase_ : str ): # check that all files have the correct sampling rate __snake_case : Optional[int] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays __snake_case : List[Any] = datasets.map( UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names ) # filter audio files that are too long __snake_case : Union[str, Any] = vectorized_datasets.filter( lambda UpperCAmelCase_ : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(UpperCAmelCase_ : Any ): return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` __snake_case : int = vectorized_datasets.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 __snake_case : Dict = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( 'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and' ' ``config.feat_extract_norm=\'layer\'' ) __snake_case : Optional[Any] = WavaVecaForPreTraining(UpperCAmelCase_ ) __snake_case : int = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) __snake_case : int = WavaVecaPreTrainer( model=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=UpperCAmelCase_ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
709
"""simple docstring""" import math import tensorflow as tf from packaging import version def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Any: '''simple docstring''' __snake_case : List[str] = tf.convert_to_tensor(UpperCAmelCase_ ) __snake_case : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = tf.convert_to_tensor(UpperCAmelCase_ ) __snake_case : Any = tf.cast(math.pi , x.dtype ) __snake_case : List[Any] = tf.cast(0.044_715 , x.dtype ) __snake_case : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCAmelCase_ , 3 )) )) return x * cdf def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> List[Any]: '''simple docstring''' __snake_case : Optional[int] = tf.convert_to_tensor(UpperCAmelCase_ ) return x * tf.tanh(tf.math.softplus(UpperCAmelCase_ ) ) def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase_ ) __snake_case : Dict = tf.cast(0.044_715 , x.dtype ) __snake_case : int = tf.cast(0.7_978_845_608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> Dict: '''simple docstring''' __snake_case : Any = tf.convert_to_tensor(UpperCAmelCase_ ) __snake_case : Dict = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]: '''simple docstring''' return tf.clip_by_value(_gelu(UpperCAmelCase_ ) , -10 , 10 ) def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=-1 ) -> int: '''simple docstring''' __snake_case , __snake_case : str = tf.split(UpperCAmelCase_ , 2 , axis=UpperCAmelCase_ ) return a * tf.math.sigmoid(UpperCAmelCase_ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> int: '''simple docstring''' return tf.keras.activations.gelu(UpperCAmelCase_ , approximate=UpperCAmelCase_ ) _a : Tuple= tf.keras.activations.gelu _a : Tuple= approximate_gelu_wrap else: _a : Any= _gelu _a : str= _gelu_new _a : List[str]= { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> List[Any]: '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
192
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = 'canine' def __init__(self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=16384 , _lowerCamelCase=16 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase=0xE000 , _lowerCamelCase=0xE001 , _lowerCamelCase=4 , _lowerCamelCase=4 , _lowerCamelCase=8 , _lowerCamelCase=16384 , _lowerCamelCase=128 , **_lowerCamelCase , ): """simple docstring""" super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) UpperCAmelCase__ : Dict = max_position_embeddings UpperCAmelCase__ : Tuple = hidden_size UpperCAmelCase__ : Optional[int] = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Dict = layer_norm_eps # Character config: UpperCAmelCase__ : Tuple = downsampling_rate UpperCAmelCase__ : Any = upsampling_kernel_size UpperCAmelCase__ : str = num_hash_functions UpperCAmelCase__ : List[str] = num_hash_buckets UpperCAmelCase__ : List[Any] = local_transformer_stride
182
"""simple docstring""" # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _A = { """configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""], """tokenization_cpmant""": ["""CpmAntTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""", """CpmAntForCausalLM""", """CpmAntModel""", """CpmAntPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
182
1
'''simple docstring''' def __A ( UpperCAmelCase ) -> int: '''simple docstring''' assert column_title.isupper() _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : List[Any] = len(UpperCAmelCase ) - 1 _UpperCamelCase : Union[str, Any] = 0 while index >= 0: _UpperCamelCase : str = (ord(column_title[index] ) - 6_4) * pow(2_6 ,UpperCAmelCase ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
204
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCAmelCase_ : int = logging.get_logger(__name__) lowerCAmelCase_ : Any = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase__ = '''gptj''' UpperCAmelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Optional[Any] , lowercase__ : Union[str, Any]=50_400 , lowercase__ : Union[str, Any]=2_048 , lowercase__ : Tuple=4_096 , lowercase__ : List[str]=28 , lowercase__ : Optional[int]=16 , lowercase__ : str=64 , lowercase__ : Any=None , lowercase__ : Any="gelu_new" , lowercase__ : Union[str, Any]=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : Any=0.0 , lowercase__ : Tuple=1e-5 , lowercase__ : Any=0.0_2 , lowercase__ : int=True , lowercase__ : int=50_256 , lowercase__ : Any=50_256 , lowercase__ : Tuple=False , **lowercase__ : str , ) ->Optional[Any]: '''simple docstring''' _UpperCamelCase : Dict = vocab_size _UpperCamelCase : List[str] = n_positions _UpperCamelCase : Union[str, Any] = n_embd _UpperCamelCase : Union[str, Any] = n_layer _UpperCamelCase : Optional[Any] = n_head _UpperCamelCase : Dict = n_inner _UpperCamelCase : Optional[Any] = rotary_dim _UpperCamelCase : Tuple = activation_function _UpperCamelCase : List[Any] = resid_pdrop _UpperCamelCase : Any = embd_pdrop _UpperCamelCase : Optional[Any] = attn_pdrop _UpperCamelCase : Optional[Any] = layer_norm_epsilon _UpperCamelCase : Union[str, Any] = initializer_range _UpperCamelCase : Optional[int] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Any = eos_token_id super().__init__( bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ ) class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] , lowercase__ : PretrainedConfig , lowercase__ : str = "default" , lowercase__ : List[PatchingSpec] = None , lowercase__ : bool = False , ) ->Union[str, Any]: '''simple docstring''' super().__init__(lowercase__ , task=lowercase__ , patching_specs=lowercase__ , use_past=lowercase__ ) if not getattr(self._config , "pad_token_id" , lowercase__ ): # TODO: how to do that better? _UpperCamelCase : Optional[int] = 0 @property def snake_case__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCamelCase : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(lowercase__ , direction="inputs" ) _UpperCamelCase : str = {0: "batch", 1: "past_sequence + sequence"} else: _UpperCamelCase : Optional[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def snake_case__ ( self : int ) ->int: '''simple docstring''' return self._config.n_layer @property def snake_case__ ( self : Dict ) ->int: '''simple docstring''' return self._config.n_head def snake_case__ ( self : int , lowercase__ : PreTrainedTokenizer , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional[TensorType] = None , ) ->Mapping[str, Any]: '''simple docstring''' _UpperCamelCase : int = super(lowercase__ , self ).generate_dummy_inputs( lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ ) # We need to order the input in the way they appears in the forward() _UpperCamelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _UpperCamelCase , _UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values _UpperCamelCase : Optional[int] = seqlen + 2 _UpperCamelCase : str = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCamelCase : Dict = [ (torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(self.num_layers ) ] _UpperCamelCase : str = common_inputs["attention_mask"] if self.use_past: _UpperCamelCase : int = ordered_inputs["attention_mask"].dtype _UpperCamelCase : Optional[int] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__ )] , dim=1 ) return ordered_inputs @property def snake_case__ ( self : Tuple ) ->int: '''simple docstring''' return 13
204
1
'''simple docstring''' def __A ( lowerCAmelCase_ ): _UpperCAmelCase : list[list[int]] = [[0 for _ in range(_UpperCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): _UpperCAmelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , _UpperCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCAmelCase_ : Optional[int] = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCAmelCase_ : Union[str, Any] = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
414
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json' ), } class lowerCamelCase__ ( A__ ): __lowerCamelCase = """xlm-prophetnet""" __lowerCamelCase = ["""past_key_values"""] __lowerCamelCase = { """num_attention_heads""": """num_encoder_attention_heads""", } def __init__( self : int , __a : Optional[float] = 0.1 , __a : Optional[Union[str, Callable]] = "gelu" , __a : Optional[int] = 30522 , __a : Optional[int] = 1024 , __a : Optional[int] = 4096 , __a : Optional[int] = 12 , __a : Optional[int] = 16 , __a : Optional[int] = 4096 , __a : Optional[int] = 12 , __a : Optional[int] = 16 , __a : Optional[float] = 0.1 , __a : Optional[float] = 0.1 , __a : Optional[int] = 512 , __a : Optional[float] = 0.02 , __a : Optional[bool] = True , __a : Optional[bool] = True , __a : Optional[int] = 0 , __a : Optional[int] = 2 , __a : Optional[int] = 32 , __a : Optional[int] = 128 , __a : Optional[bool] = False , __a : Optional[float] = 0.0 , __a : Optional[bool] = True , __a : Optional[int] = 0 , __a : Optional[int] = 1 , __a : Optional[int] = 2 , **__a : Union[str, Any] , ): '''simple docstring''' lowerCamelCase__: Union[str, Any] = vocab_size lowerCamelCase__: Union[str, Any] = hidden_size lowerCamelCase__: Dict = encoder_ffn_dim lowerCamelCase__: Union[str, Any] = num_encoder_layers lowerCamelCase__: List[str] = num_encoder_attention_heads lowerCamelCase__: Optional[Any] = decoder_ffn_dim lowerCamelCase__: Any = num_decoder_layers lowerCamelCase__: List[str] = num_decoder_attention_heads lowerCamelCase__: str = max_position_embeddings lowerCamelCase__: Dict = init_std # Normal(0, this parameter) lowerCamelCase__: Optional[int] = activation_function # parameters for xlmprophetnet lowerCamelCase__: Union[str, Any] = ngram lowerCamelCase__: Any = num_buckets lowerCamelCase__: int = relative_max_distance lowerCamelCase__: int = disable_ngram_loss lowerCamelCase__: Union[str, Any] = eps # 3 Types of Dropout lowerCamelCase__: Dict = attention_dropout lowerCamelCase__: List[Any] = activation_dropout lowerCamelCase__: Tuple = dropout lowerCamelCase__: Union[str, Any] = use_cache super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , add_cross_attention=__a , decoder_start_token_id=__a , **__a , ) @property def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def lowerCamelCase_ ( self : str , __a : str ): '''simple docstring''' raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and""" """ `num_decoder_layers`.""" )
306
0
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = None ) -> Any: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path lowerCAmelCase__ = quote(UpperCamelCase__ ) return hfh.hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' , revision=UpperCamelCase__ )
715
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self ,a_ ,a_=13 ,a_=32 ,a_=2 ,a_=3 ,a_=16 ,a_=[1, 2, 1] ,a_=[2, 2, 4] ,a_=2 ,a_=2.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=True ,a_=0.02 ,a_=1e-5 ,a_=True ,a_=None ,a_=True ,a_=10 ,a_=8 ,): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embed_dim lowerCAmelCase__ = depths lowerCAmelCase__ = num_heads lowerCAmelCase__ = window_size lowerCAmelCase__ = mlp_ratio lowerCAmelCase__ = qkv_bias lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = hidden_act lowerCAmelCase__ = use_absolute_embeddings lowerCAmelCase__ = patch_norm lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = initializer_range lowerCAmelCase__ = is_training lowerCAmelCase__ = scope lowerCAmelCase__ = use_labels lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = encoder_stride def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" return SwinvaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ): """simple docstring""" lowerCAmelCase__ = SwinvaModel(config=a_ ) model.to(a_ ) model.eval() lowerCAmelCase__ = model(a_ ) lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ): """simple docstring""" lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=a_ ) model.to(a_ ) model.eval() lowerCAmelCase__ = model(a_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = SwinvaForMaskedImageModeling(a_ ) model.to(a_ ) model.eval() lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(a_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ): """simple docstring""" lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = SwinvaForImageClassification(a_ ) model.to(a_ ) model.eval() lowerCAmelCase__ = model(a_ ,labels=a_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = SwinvaModelTester(self ) lowerCAmelCase__ = ConfigTester(self ,config_class=a_ ,embed_dim=37 ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(a_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a_ ,nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(a_ ) lowerCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] ,a_ ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = True for model_class in self.all_model_classes: lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ) lowerCAmelCase__ = outputs.attentions lowerCAmelCase__ = len(self.model_tester.depths ) self.assertEqual(len(a_ ) ,a_ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ = True lowerCAmelCase__ = config.window_size**2 lowerCAmelCase__ = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ) lowerCAmelCase__ = outputs.attentions self.assertEqual(len(a_ ) ,a_ ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) lowerCAmelCase__ = len(a_ ) # Check attention is always last and order is fine lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ) if hasattr(self.model_tester ,'num_hidden_states_types' ): lowerCAmelCase__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ = 2 self.assertEqual(out_len + added_hidden_states ,len(a_ ) ) lowerCAmelCase__ = outputs.attentions self.assertEqual(len(a_ ) ,a_ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,) def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ): """simple docstring""" lowerCAmelCase__ = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) ) lowerCAmelCase__ = outputs.hidden_states lowerCAmelCase__ = getattr( self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a_ ) ,a_ ) # Swinv2 has a different seq_length lowerCAmelCase__ = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowerCAmelCase__ = outputs.reshaped_hidden_states self.assertEqual(len(a_ ) ,a_ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reshaped_hidden_states[0].shape lowerCAmelCase__ = ( reshaped_hidden_states[0].view(a_ ,a_ ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ = True self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = 3 lowerCAmelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ = True self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a_ ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = SwinvaModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = _config_zero_init(a_ ) for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(config=a_ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,) @require_vision @require_torch class __snake_case ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self ): """simple docstring""" lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( a_ ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='pt' ).to(a_ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**a_ ) # verify the logits lowerCAmelCase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,a_ ) lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
604
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Optional[Any] = { 'configuration_upernet': ['UperNetConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'UperNetForSemanticSegmentation', 'UperNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys _lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a = logging.get_logger(__name__) a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' for attribute in key.split(""".""" ): __SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ) if weight_type is not None: __SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape else: __SCREAMING_SNAKE_CASE = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __SCREAMING_SNAKE_CASE = value elif weight_type == "weight_g": __SCREAMING_SNAKE_CASE = value elif weight_type == "weight_v": __SCREAMING_SNAKE_CASE = value elif weight_type == "bias": __SCREAMING_SNAKE_CASE = value else: __SCREAMING_SNAKE_CASE = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = fairseq_model.state_dict() __SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __SCREAMING_SNAKE_CASE = False if "conv_layers" in name: load_conv_layer( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) __SCREAMING_SNAKE_CASE = True else: for key, mapped_key in MAPPING.items(): __SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): __SCREAMING_SNAKE_CASE = True if "*" in mapped_key: __SCREAMING_SNAKE_CASE = name.split(__UpperCAmelCase )[0].split(""".""" )[-2] __SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __UpperCAmelCase ) if "weight_g" in name: __SCREAMING_SNAKE_CASE = """weight_g""" elif "weight_v" in name: __SCREAMING_SNAKE_CASE = """weight_v""" elif "weight" in name: __SCREAMING_SNAKE_CASE = """weight""" elif "bias" in name: __SCREAMING_SNAKE_CASE = """bias""" else: __SCREAMING_SNAKE_CASE = None set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) continue if not is_used: unused_weights.append(__UpperCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1] __SCREAMING_SNAKE_CASE = name.split(""".""" ) __SCREAMING_SNAKE_CASE = int(items[0] ) __SCREAMING_SNAKE_CASE = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCAmelCase ) @torch.no_grad() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple: '''simple docstring''' if config_path is not None: __SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = HubertConfig() if is_finetuned: if dict_path: __SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __SCREAMING_SNAKE_CASE = target_dict.pad_index __SCREAMING_SNAKE_CASE = target_dict.bos_index __SCREAMING_SNAKE_CASE = target_dict.eos_index __SCREAMING_SNAKE_CASE = len(target_dict.symbols ) __SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab.json""" ) if not os.path.isdir(__UpperCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) ) return os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer( __UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) processor.save_pretrained(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = HubertForCTC(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = HubertModel(__UpperCAmelCase ) if is_finetuned: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __SCREAMING_SNAKE_CASE = model[0].eval() recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) hf_wavavec.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) a = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
109
0
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case_ : def __init__( self , a_ , a_=1_3 , a_=3_0 , a_=2 , a_=3 , a_=True , a_=True , a_=3_2 , a_=2 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_0 , a_=0.02 , a_=3 , a_=None , a_=2 , ): a_ : Any = parent a_ : List[Any] = batch_size a_ : Optional[int] = image_size a_ : Optional[int] = patch_size a_ : Tuple = num_channels a_ : Union[str, Any] = is_training a_ : Optional[int] = use_labels a_ : List[Any] = hidden_size a_ : Optional[Any] = num_hidden_layers a_ : Dict = num_attention_heads a_ : Any = intermediate_size a_ : int = hidden_act a_ : Tuple = hidden_dropout_prob a_ : Tuple = attention_probs_dropout_prob a_ : Tuple = type_sequence_label_size a_ : List[Any] = initializer_range a_ : Tuple = scope a_ : List[str] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) a_ : List[str] = (image_size // patch_size) ** 2 a_ : int = num_patches + 2 def snake_case_ ( self ): a_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a_ : Dict = None if self.use_labels: a_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = self.get_config() return config, pixel_values, labels def snake_case_ ( self ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case_ ( self , a_ , a_ , a_ ): a_ : List[Any] = TFDeiTModel(config=_A ) a_ : Dict = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case_ ( self , a_ , a_ , a_ ): a_ : List[Any] = TFDeiTForMaskedImageModeling(config=_A ) a_ : Tuple = model(_A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images a_ : Optional[int] = 1 a_ : List[Any] = TFDeiTForMaskedImageModeling(_A ) a_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a_ : Tuple = model(_A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case_ ( self , a_ , a_ , a_ ): a_ : Any = self.type_sequence_label_size a_ : str = TFDeiTForImageClassification(_A ) a_ : Dict = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a_ : Optional[Any] = 1 a_ : Any = TFDeiTForImageClassification(_A ) a_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a_ : Union[str, Any] = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case_ ( self ): a_ : List[str] = self.prepare_config_and_inputs() a_ , a_ , a_ : Optional[int] = config_and_inputs a_ : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ): __lowerCAmelCase = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __lowerCAmelCase = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def snake_case_ ( self ): a_ : Any = TFDeiTModelTester(self ) a_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 ) def snake_case_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def snake_case_ ( self ): pass def snake_case_ ( self ): a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : Optional[int] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) a_ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) ) def snake_case_ ( self ): a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a_ : List[str] = model_class(_A ) a_ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a_ : Tuple = [*signature.parameters.keys()] a_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , _A ) def snake_case_ ( self ): a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def snake_case_ ( self ): a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A ) def snake_case_ ( self ): a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) def snake_case_ ( self , a_ , a_ , a_=False ): a_ : List[Any] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def snake_case_ ( self ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : Optional[Any] = TFDeiTModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def lowerCAmelCase_ ( ) -> Optional[int]: a_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def snake_case_ ( self ): return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def snake_case_ ( self ): a_ : Any = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ) a_ : Optional[int] = self.default_image_processor a_ : Any = prepare_img() a_ : Any = image_processor(images=_A , return_tensors="tf" ) # forward pass a_ : int = model(**_A ) # verify the logits a_ : Tuple = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _A ) a_ : Optional[int] = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
705
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_ = { """configuration_bigbird_pegasus""": [ """BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BigBirdPegasusConfig""", """BigBirdPegasusOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""", """BigBirdPegasusForCausalLM""", """BigBirdPegasusForConditionalGeneration""", """BigBirdPegasusForQuestionAnswering""", """BigBirdPegasusForSequenceClassification""", """BigBirdPegasusModel""", """BigBirdPegasusPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
370
0
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = 50 ): __lowerCamelCase : Optional[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"""{solution() = }""")
669
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A_ ( __UpperCamelCase ): '''simple docstring''' __snake_case = ["""image_processor""", """tokenizer"""] __snake_case = """CLIPImageProcessor""" __snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""") def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ): __lowerCamelCase : int = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) __lowerCamelCase : str = kwargs.pop('feature_extractor' ) __lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: __lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a ) if images is not None: __lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a ) if text is not None and images is not None: __lowerCamelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ): return self.tokenizer.batch_decode(*a , **a ) def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ): return self.tokenizer.decode(*a , **a ) @property def _snake_case ( self: List[str] ): __lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names __lowerCamelCase : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
669
1
class lowerCamelCase__: def __init__( self: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] ): __lowerCamelCase = name __lowerCamelCase = val def __str__( self: str ): return F'{self.__class__.__name__}({self.name}, {self.val})' def __lt__( self: Optional[int] , UpperCamelCase_: Any ): return self.val < other.val class lowerCamelCase__: def __init__( self: Union[str, Any] , UpperCamelCase_: Tuple ): __lowerCamelCase = {} __lowerCamelCase = {} __lowerCamelCase = self.build_heap(UpperCamelCase_ ) def __getitem__( self: Optional[int] , UpperCamelCase_: Union[str, Any] ): return self.get_value(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ): return (idx - 1) // 2 def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ): return idx * 2 + 1 def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Any ): return idx * 2 + 2 def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ): return self.heap_dict[key] def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] ): __lowerCamelCase = len(UpperCamelCase_ ) - 1 __lowerCamelCase = self.get_parent_idx(UpperCamelCase_ ) for idx, i in enumerate(UpperCamelCase_ ): __lowerCamelCase = idx __lowerCamelCase = i.val for i in range(UpperCamelCase_ , -1 , -1 ): self.sift_down(UpperCamelCase_ , UpperCamelCase_ ) return array def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple ): while True: __lowerCamelCase = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741 __lowerCamelCase = self.get_right_child_idx(UpperCamelCase_ ) __lowerCamelCase = idx if l < len(UpperCamelCase_ ) and array[l] < array[idx]: __lowerCamelCase = l if r < len(UpperCamelCase_ ) and array[r] < array[smallest]: __lowerCamelCase = r if smallest != idx: __lowerCamelCase, __lowerCamelCase = array[smallest], array[idx] ( ( __lowerCamelCase ), ( __lowerCamelCase ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) __lowerCamelCase = smallest else: break def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ): __lowerCamelCase = self.get_parent_idx(UpperCamelCase_ ) while p >= 0 and self.heap[p] > self.heap[idx]: __lowerCamelCase, __lowerCamelCase = self.heap[idx], self.heap[p] __lowerCamelCase, __lowerCamelCase = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) __lowerCamelCase = p __lowerCamelCase = self.get_parent_idx(UpperCamelCase_ ) def lowerCAmelCase__ ( self: Tuple ): return self.heap[0] def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase, __lowerCamelCase = self.heap[-1], self.heap[0] __lowerCamelCase, __lowerCamelCase = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) __lowerCamelCase = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ): self.heap.append(UpperCamelCase_ ) __lowerCamelCase = len(self.heap ) - 1 __lowerCamelCase = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCAmelCase__ ( self: List[str] ): return len(self.heap ) == 0 def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" __lowerCamelCase = new_value __lowerCamelCase = new_value self.sift_up(self.idx_of_element[node] ) UpperCAmelCase_ = Node('R', -1) UpperCAmelCase_ = Node('B', 6) UpperCAmelCase_ = Node('A', 3) UpperCAmelCase_ = Node('X', 1) UpperCAmelCase_ = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCAmelCase_ = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
80
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCamelCase__( __lowerCamelCase): @slow @require_torch def lowerCAmelCase__ ( self: Union[str, Any] ): __lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" ) __lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" ) __lowerCamelCase = bertabert.config.encoder.vocab_size __lowerCamelCase = tokenizer.sep_token_id __lowerCamelCase = tokenizer.cls_token_id __lowerCamelCase = 1_28 __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" ) __lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" ) __lowerCamelCase = train_dataset.select(range(32 ) ) __lowerCamelCase = val_dataset.select(range(16 ) ) __lowerCamelCase = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ): # Tokenizer will automatically set [BOS] <text> [EOS] __lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 ) __lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 ) __lowerCamelCase = inputs.input_ids __lowerCamelCase = inputs.attention_mask __lowerCamelCase = outputs.input_ids __lowerCamelCase = outputs.input_ids.copy() __lowerCamelCase = [ [-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] __lowerCamelCase = outputs.attention_mask assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids ) assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase_: int ): __lowerCamelCase = pred.label_ids __lowerCamelCase = pred.predictions # all unnecessary tokens are removed __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ ) return {"accuracy": accuracy} # map train dataset __lowerCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset __lowerCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) __lowerCamelCase = self.get_auto_remove_tmp_dir() __lowerCamelCase = SeqaSeqTrainingArguments( output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __lowerCamelCase = SeqaSeqTrainer( model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , ) # start training trainer.train()
80
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan UpperCamelCase_ : Union[str, Any] = 6378137.0 UpperCamelCase_ : List[str] = 6356752.314245 UpperCamelCase_ : int = 6378137 def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: List[str] ) -> Any: """simple docstring""" _snake_case = (AXIS_A - AXIS_B) / AXIS_A _snake_case = atan((1 - flattening) * tan(radians(__SCREAMING_SNAKE_CASE ) ) ) _snake_case = atan((1 - flattening) * tan(radians(__SCREAMING_SNAKE_CASE ) ) ) _snake_case = radians(__SCREAMING_SNAKE_CASE ) _snake_case = radians(__SCREAMING_SNAKE_CASE ) # Equation _snake_case = sin((phi_a - phi_a) / 2 ) _snake_case = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda _snake_case = sqrt(sin_sq_phi + (cos(__SCREAMING_SNAKE_CASE ) * cos(__SCREAMING_SNAKE_CASE ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
185
'''simple docstring''' def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =[0] * len(__SCREAMING_SNAKE_CASE ) for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): # use last results for better performance - dynamic programming _UpperCamelCase =prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: _UpperCamelCase =prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 _UpperCamelCase =j return prefix_result def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" return max(prefix_function(__SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod()
404
0
import string import numpy def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE ) class lowerCAmelCase_ : _UpperCamelCase : Optional[int] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _UpperCamelCase : Tuple = numpy.vectorize(lambda __snake_case : x % 36 ) _UpperCamelCase : List[str] = numpy.vectorize(SCREAMING_SNAKE_CASE__ ) def __init__( self , _lowerCAmelCase ): _lowercase : Dict = self.modulus(snake_case__ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _lowercase : Tuple = encrypt_key.shape[0] def __a ( self , _lowerCAmelCase ): return self.key_string.index(snake_case__ ) def __a ( self , _lowerCAmelCase ): return self.key_string[round(snake_case__ )] def __a ( self ): _lowercase : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowercase : List[Any] = det % len(self.key_string ) _lowercase : Dict = len(self.key_string ) if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1: _lowercase : Optional[Any] = ( F"""determinant modular {req_l} of encryption key({det}) """ F"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(snake_case__ ) def __a ( self , _lowerCAmelCase ): _lowercase : List[str] = [char for char in text.upper() if char in self.key_string] _lowercase : str = chars[-1] while len(snake_case__ ) % self.break_key != 0: chars.append(snake_case__ ) return "".join(snake_case__ ) def __a ( self , _lowerCAmelCase ): _lowercase : Any = self.process_text(text.upper() ) _lowercase : Tuple = "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): _lowercase : Optional[int] = text[i : i + self.break_key] _lowercase : List[str] = [self.replace_letters(snake_case__ ) for char in batch] _lowercase : Optional[int] = numpy.array([vec] ).T _lowercase : List[str] = self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[ 0 ] _lowercase : List[str] = "".join( self.replace_digits(snake_case__ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __a ( self ): _lowercase : int = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowercase : Optional[Any] = det % len(self.key_string ) _lowercase : Dict = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _lowercase : Tuple = i break _lowercase : List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(snake_case__ ) ) def __a ( self , _lowerCAmelCase ): _lowercase : str = self.make_decrypt_key() _lowercase : int = self.process_text(text.upper() ) _lowercase : Union[str, Any] = "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): _lowercase : Optional[Any] = text[i : i + self.break_key] _lowercase : Union[str, Any] = [self.replace_letters(snake_case__ ) for char in batch] _lowercase : Optional[int] = numpy.array([vec] ).T _lowercase : Union[str, Any] = self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0] _lowercase : Optional[int] = "".join( self.replace_digits(snake_case__ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __magic_name__ ( ) -> None: _lowercase : Optional[int] = int(input('Enter the order of the encryption key: ' ) ) _lowercase : Tuple = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(SCREAMING_SNAKE_CASE ): _lowercase : Optional[Any] = [int(SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) _lowercase : List[Any] = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": _lowercase : Optional[int] = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(SCREAMING_SNAKE_CASE ) ) elif option == "2": _lowercase : Dict = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
719
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase = "pt" elif is_tf_available(): UpperCamelCase = "tf" else: UpperCamelCase = "jax" class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Dict = PerceiverTokenizer _UpperCamelCase : str = False def __a ( self ): super().setUp() _lowercase : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __a ( self , **_lowerCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowercase : Union[str, Any] = [] for i in range(len(_lowerCAmelCase ) ): try: _lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) ) _lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowercase : Any = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowercase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowercase : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowercase : List[Any] = ' ' + output_txt _lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __a ( self ): _lowercase : Dict = self.perceiver_tokenizer _lowercase : Optional[Any] = 'Unicode €.' _lowercase : str = tokenizer(_lowerCAmelCase ) _lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' ) _lowercase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : int = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __a ( self ): _lowercase : List[str] = self.perceiver_tokenizer _lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) if FRAMEWORK != "jax": _lowercase : int = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def __a ( self ): _lowercase : List[Any] = self.perceiver_tokenizer _lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _lowerCAmelCase ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertNotIn('decoder_input_ids' , _lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.perceiver_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : Optional[int] = tokenizer( text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def __a ( self ): # safety check on max_len default value so we are sure the test works _lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowercase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) _lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[str] = tempfile.mkdtemp() _lowercase : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : List[str] = json.load(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(_lowerCAmelCase ) _lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] _lowercase : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[int] = tokenizer_class.from_pretrained( _lowerCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )] _lowercase : Tuple = tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __a ( self ): _lowercase : str = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
677
0