code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations def __magic_name__ ( __a : dict , __a : str ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = set(__a ), [start] while stack: UpperCamelCase__ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase_ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
86
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(SCREAMING_SNAKE_CASE_ )}." ) # get prompt text embeddings UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_embeddings.shape UpperCamelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ = 42 if negative_prompt is None: UpperCamelCase__ = [""""""] elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !=" F" {type(SCREAMING_SNAKE_CASE_ )}." ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCamelCase__ = negative_prompt UpperCamelCase__ = text_input_ids.shape[-1] UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , ) UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ = uncond_embeddings.shape[1] UpperCamelCase__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCamelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCamelCase__ = latents_reference.to(self.device ) UpperCamelCase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCamelCase__ = 0 if dx < 0 else dx UpperCamelCase__ = 0 if dy < 0 else dy UpperCamelCase__ = max(-dx , 0 ) UpperCamelCase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 ) UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 1 / 0.1_8215 * latents UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCamelCase__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to( self.device ) UpperCamelCase__ , UpperCamelCase__ = self.safety_checker( images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCamelCase__ = None if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
86
1
lowerCamelCase_ = [0, 2, 4, 6, 8] lowerCamelCase_ = [1, 3, 5, 7, 9] def __magic_name__ ( __a : int , __a : int , __a : list[int] , __a : int ): '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 UpperCamelCase__ = 0 for digit in range(10 ): UpperCamelCase__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __a , __a ) return result UpperCamelCase__ = 0 for digita in range(10 ): UpperCamelCase__ = digita if (remainder + digita) % 2 == 0: UpperCamelCase__ = ODD_DIGITS else: UpperCamelCase__ = EVEN_DIGITS for digita in other_parity_digits: UpperCamelCase__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __a , __a , ) return result def __magic_name__ ( __a : int = 9 ): '''simple docstring''' UpperCamelCase__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__a , 0 , [0] * length , __a ) return result if __name__ == "__main__": print(f'{solution() = }')
86
from ..utils import DummyObject, requires_backends class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] )
86
1
from __future__ import annotations import numpy as np def __magic_name__ ( __a : list[float] ): '''simple docstring''' return np.maximum(0 , __a ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
86
from __future__ import annotations from typing import TypedDict class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__a ) )] def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) UpperCamelCase__ = all_rotations(__a ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCamelCase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__a ), } return response def __magic_name__ ( __a : str , __a : int ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: UpperCamelCase__ = int(__a ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__a ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) UpperCamelCase__ = [""""""] * len(__a ) for _ in range(len(__a ) ): for i in range(len(__a ) ): UpperCamelCase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase_ = '''Provide a string that I will generate its BWT transform: ''' lowerCamelCase_ = input(entry_msg).strip() lowerCamelCase_ = bwt_transform(s) print( f'Burrows Wheeler transform for string \'{s}\' results ' f'in \'{result["bwt_string"]}\'' ) lowerCamelCase_ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' f'we get original string \'{original_string}\'' )
86
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __A( unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = parent def UpperCAmelCase_ (self ): return {} def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR=\"FFFFFF\"> <HR> <a href=\"http://google.com\">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style=\"color:#0000FF\"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" UpperCamelCase__ = """ <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_a, html_string_a] @require_bsa class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MarkupLMFeatureExtractor if is_bsa_available() else None def UpperCAmelCase_ (self ): UpperCamelCase__ = MarkupLMFeatureExtractionTester(self ) @property def UpperCAmelCase_ (self ): return self.feature_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase_ (self ): # Initialize feature_extractor UpperCamelCase__ = self.feature_extraction_class() # Test not batched input UpperCamelCase__ = get_html_strings()[0] UpperCamelCase__ = feature_extractor(SCREAMING_SNAKE_CASE_ ) # fmt: off UpperCamelCase__ = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]] UpperCamelCase__ = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]] # fmt: on self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ ) # Test batched UpperCamelCase__ = get_html_strings() UpperCamelCase__ = feature_extractor(SCREAMING_SNAKE_CASE_ ) # fmt: off UpperCamelCase__ = expected_nodes + [["""My First Heading""", """My first paragraph."""]] UpperCamelCase__ = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ )
86
import os from datetime import datetime as dt from github import Github lowerCamelCase_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" ) UpperCamelCase__ = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
86
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = torch.from_numpy(__a ) return 2.0 * image - 1.0 class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ): if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): UpperCamelCase__ = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase__ = next(self.unet.parameters() ).dtype UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device ) UpperCamelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase__ = torch.cat([latents, image] , dim=1 ) UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase__ = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = torch.clamp(SCREAMING_SNAKE_CASE_ , -1.0 , 1.0 ) UpperCamelCase__ = image / 2 + 0.5 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
1
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(__a , __a ) lowerCamelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = list(s_dict.keys() ) for key in keys: UpperCamelCase__ = key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCamelCase__ = new_key.replace(__a , __a ) print(f"{key} -> {new_key}" ) UpperCamelCase__ = s_dict.pop(__a ) return s_dict def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.basename(__a ) UpperCamelCase__ = url.split("""/""" )[-2] UpperCamelCase__ = os.path.join(__a , __a ) if os.path.exists(__a ) and not os.path.isfile(__a ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(__a ): UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop: while True: UpperCamelCase__ = source.read(8_192 ) if not buffer: break output.write(__a ) loop.update(len(__a ) ) UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ): '''simple docstring''' if ".pt" not in checkpoint_path: UpperCamelCase__ = _download(_MODELS[checkpoint_path] ) else: UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = original_checkpoint["""dims"""] UpperCamelCase__ = original_checkpoint["""model_state_dict"""] UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(__a ) rename_keys(__a ) UpperCamelCase__ = True UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] UpperCamelCase__ = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) UpperCamelCase__ = WhisperForConditionalGeneration(__a ) UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase__ = proj_out_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
86
def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = len(__a ) UpperCamelCase__ = len(__a ) UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase__ = True for i in range(__a ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase__ = True if a[i].islower(): UpperCamelCase__ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
86
1
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = DownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" def UpperCAmelCase_ (self ): UpperCamelCase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ResnetDownsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common() UpperCamelCase__ = 32 return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SimpleCrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common() UpperCamelCase__ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnSkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = DownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = { """in_channels""": 32, """out_channels""": 32, } UpperCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnDownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """down""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = { """in_channels""": 32, """out_channels""": 32, } UpperCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UNetMidBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """mid""" def UpperCAmelCase_ (self ): UpperCamelCase__ = { """in_channels""": 32, """temb_channels""": 1_28, } UpperCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UNetMidBlockaDCrossAttn # noqa F405 SCREAMING_SNAKE_CASE__ = """mid""" def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common() UpperCamelCase__ = 32 return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UNetMidBlockaDSimpleCrossAttn # noqa F405 SCREAMING_SNAKE_CASE__ = """mid""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common() UpperCamelCase__ = 32 return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ResnetUpsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common() UpperCamelCase__ = 32 return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SimpleCrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common() UpperCamelCase__ = 32 return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = SkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnSkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = UpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = {"""in_channels""": 32, """out_channels""": 32} UpperCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(SCREAMING_SNAKE_CASE_ ) class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = AttnUpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE__ = """up""" @property def UpperCAmelCase_ (self ): return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = {"""in_channels""": 32, """out_channels""": 32} UpperCamelCase__ = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(SCREAMING_SNAKE_CASE_ )
86
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = prime_factors(__a ) if is_square_free(__a ): return -1 if len(__a ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
86
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
1
import os def __magic_name__ ( ): '''simple docstring''' with open(os.path.dirname(__a ) + """/grid.txt""" ) as f: UpperCamelCase__ = [] # noqa: E741 for _ in range(20 ): l.append([int(__a ) for x in f.readline().split()] ) UpperCamelCase__ = 0 # right for i in range(20 ): for j in range(17 ): UpperCamelCase__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCamelCase__ = temp # down for i in range(17 ): for j in range(20 ): UpperCamelCase__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCamelCase__ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCamelCase__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCamelCase__ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): UpperCamelCase__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCamelCase__ = temp return maximum if __name__ == "__main__": print(solution())
86
from PIL import Image def __magic_name__ ( __a : Image , __a : float ): '''simple docstring''' def brightness(__a : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 lowerCamelCase_ = change_brightness(img, 1_00) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
86
1
import math from collections.abc import Iterator from itertools import takewhile def __magic_name__ ( __a : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = 2 while True: if is_prime(__a ): yield num num += 1 def __magic_name__ ( __a : int = 2_000_000 ): '''simple docstring''' return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(f'{solution() = }')
86
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCamelCase_ = [None] * 10_00_00_00 lowerCamelCase_ = True lowerCamelCase_ = False def __magic_name__ ( __a : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCamelCase__ = chain(next_number(__a ) ) UpperCamelCase__ = number_chain while number < 10_000_000: UpperCamelCase__ = number_chain number *= 10 return number_chain def __magic_name__ ( __a : int = 10_000_000 ): '''simple docstring''' for i in range(1 , __a ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__a ) if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution() = }')
86
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """deberta-v2""" def __init__(self , SCREAMING_SNAKE_CASE_=12_81_00 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=61_44 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="gelu" , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = initializer_range UpperCamelCase__ = relative_attention UpperCamelCase__ = max_relative_positions UpperCamelCase__ = pad_token_id UpperCamelCase__ = position_biased_input # Backwards compatibility if type(SCREAMING_SNAKE_CASE_ ) == str: UpperCamelCase__ = [x.strip() for x in pos_att_type.lower().split("""|""" )] UpperCamelCase__ = pos_att_type UpperCamelCase__ = vocab_size UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = kwargs.get("""pooler_hidden_size""" , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pooler_dropout UpperCamelCase__ = pooler_hidden_act class __A( __lowerCamelCase ): """simple docstring""" @property def UpperCAmelCase_ (self ): if self.task == "multiple-choice": UpperCamelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: UpperCamelCase__ = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def UpperCAmelCase_ (self ): return 12 def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 40 , SCREAMING_SNAKE_CASE_ = 40 , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = super().generate_dummy_inputs(preprocessor=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
86
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(__a , __a ) lowerCamelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = list(s_dict.keys() ) for key in keys: UpperCamelCase__ = key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCamelCase__ = new_key.replace(__a , __a ) print(f"{key} -> {new_key}" ) UpperCamelCase__ = s_dict.pop(__a ) return s_dict def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.basename(__a ) UpperCamelCase__ = url.split("""/""" )[-2] UpperCamelCase__ = os.path.join(__a , __a ) if os.path.exists(__a ) and not os.path.isfile(__a ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(__a ): UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop: while True: UpperCamelCase__ = source.read(8_192 ) if not buffer: break output.write(__a ) loop.update(len(__a ) ) UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ): '''simple docstring''' if ".pt" not in checkpoint_path: UpperCamelCase__ = _download(_MODELS[checkpoint_path] ) else: UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = original_checkpoint["""dims"""] UpperCamelCase__ = original_checkpoint["""model_state_dict"""] UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(__a ) rename_keys(__a ) UpperCamelCase__ = True UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] UpperCamelCase__ = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) UpperCamelCase__ = WhisperForConditionalGeneration(__a ) UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase__ = proj_out_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
86
1
from collections.abc import Iterable from typing import Generic, TypeVar lowerCamelCase_ = TypeVar('''_T''') class __A( Generic[_T] ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase__ = list(iterable or [] ) UpperCamelCase__ = [] def __len__(self ): return len(self._stacka ) + len(self._stacka ) def __repr__(self ): return F"Queue({tuple(self._stacka[::-1] + self._stacka )})" def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): self._stacka.append(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self._stacka.pop UpperCamelCase__ = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
86
def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = [[0 for _ in range(__a )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCamelCase__ = 1 for n in range(m + 1 ): for k in range(1 , __a ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase_ = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCamelCase_ = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
86
1
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def __magic_name__ ( __a : Union[str, Any] , __a : str , __a : Optional[Any] , __a : Tuple , __a : Any ): '''simple docstring''' with open(__a ) as metadata_file: UpperCamelCase__ = json.load(__a ) UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__a , **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) # Load the entity vocab file UpperCamelCase__ = load_entity_vocab(__a ) UpperCamelCase__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks UpperCamelCase__ = AddedToken("""<ent>""" , lstrip=__a , rstrip=__a ) UpperCamelCase__ = AddedToken("""<ent2>""" , lstrip=__a , rstrip=__a ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(__a ) with open(os.path.join(__a , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f: json.dump(__a , __a ) UpperCamelCase__ = LukeTokenizer.from_pretrained(__a ) # Initialize the embeddings of the special tokens UpperCamelCase__ = state_dict["""embeddings.word_embeddings.weight"""] UpperCamelCase__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 ) UpperCamelCase__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 ) UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: UpperCamelCase__ = f"encoder.layer.{layer_index}.attention.self." UpperCamelCase__ = state_dict[prefix + matrix_name] UpperCamelCase__ = state_dict[prefix + matrix_name] UpperCamelCase__ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks UpperCamelCase__ = state_dict["""entity_embeddings.entity_embeddings.weight"""] UpperCamelCase__ = entity_emb[entity_vocab["""[MASK]"""]] UpperCamelCase__ = LukeModel(config=__a ).eval() UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__a , strict=__a ) if not (len(__a ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"Missing keys {', '.join(__a )}. Expected only missing embeddings.position_ids" ) if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )): raise ValueError( """Unexpected keys""" f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" ) # Check outputs UpperCamelCase__ = LukeTokenizer.from_pretrained(__a , task="""entity_classification""" ) UpperCamelCase__ = ( """Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the""" """ new world number one avoid a humiliating second- round exit at Wimbledon .""" ) UpperCamelCase__ = (39, 42) UpperCamelCase__ = tokenizer(__a , entity_spans=[span] , add_prefix_space=__a , return_tensors="""pt""" ) UpperCamelCase__ = model(**__a ) # Verify word hidden states if model_size == "large": UpperCamelCase__ = torch.Size((1, 42, 1_024) ) UpperCamelCase__ = torch.tensor( [[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] ) else: # base UpperCamelCase__ = torch.Size((1, 42, 768) ) UpperCamelCase__ = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": UpperCamelCase__ = torch.Size((1, 1, 1_024) ) UpperCamelCase__ = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] ) else: # base UpperCamelCase__ = torch.Size((1, 1, 768) ) UpperCamelCase__ = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __a , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(__a ) ) model.save_pretrained(__a ) def __magic_name__ ( __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = {} with open(__a , """r""" , encoding="""utf-8""" ) as f: for index, line in enumerate(__a ): UpperCamelCase__ , UpperCamelCase__ = line.rstrip().split("""\t""" ) UpperCamelCase__ = index return entity_vocab if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) lowerCamelCase_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
86
class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if sources is int: UpperCamelCase__ = [sources] if sinks is int: UpperCamelCase__ = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return UpperCamelCase__ = sources[0] UpperCamelCase__ = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: UpperCamelCase__ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase__ = max_input_flow UpperCamelCase__ = 0 UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase__ = max_input_flow UpperCamelCase__ = size - 1 def UpperCAmelCase_ (self ): if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = algorithm(self ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = flow_network UpperCamelCase__ = flow_network.verticesCount UpperCamelCase__ = flow_network.sourceIndex UpperCamelCase__ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase__ = flow_network.graph UpperCamelCase__ = False def UpperCAmelCase_ (self ): if not self.executed: self._algorithm() UpperCamelCase__ = True def UpperCAmelCase_ (self ): pass class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result UpperCamelCase__ = -1 def UpperCAmelCase_ (self ): if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase__ = [0] * self.verticies_count UpperCamelCase__ = [0] * self.verticies_count def UpperCAmelCase_ (self ): UpperCamelCase__ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase__ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase__ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = vertices_list[i] UpperCamelCase__ = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = 0 else: i += 1 UpperCamelCase__ = sum(self.preflow[self.source_index] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase__ = self.heights[to_index] if min_height is not None: UpperCamelCase__ = min_height + 1 if __name__ == "__main__": lowerCamelCase_ = [0] lowerCamelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowerCamelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowerCamelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowerCamelCase_ = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
86
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCamelCase_ = False class __A( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ (self ): UpperCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) # remove text_unet pipe.remove_unused_weights() pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A painting of a squirrel eating a burger """ UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = generator.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase_ (self ): UpperCamelCase__ = VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A painting of a squirrel eating a burger """ UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
86
from timeit import timeit def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: number &= number - 1 result += 1 return result def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __magic_name__ ( ): '''simple docstring''' def do_benchmark(__a : int ) -> None: UpperCamelCase__ = """import __main__ as z""" print(f"Benchmark when {number = }:" ) print(f"{get_set_bits_count_using_modulo_operator(__a ) = }" ) UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a ) print(f"timeit() runs in {timing} seconds" ) print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" ) UpperCamelCase__ = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , ) print(f"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
86
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = RobertaTokenizer SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""} def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = """Encode this sequence.""" UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens UpperCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Encode <mask> sequence""" UpperCamelCase__ = """Encode <mask>sequence""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A, <mask> AllenNLP sentence.""" UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCAmelCase_ (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
86
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __A( __lowerCamelCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def UpperCAmelCase_ (self ): import PIL.Image UpperCamelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects: UpperCamelCase__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) UpperCamelCase__ , UpperCamelCase__ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __magic_name__ ( __a : List[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Tuple , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=__a , features=__a ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() UpperCamelCase__ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__a ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : List[Any] , __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Union[str, Any] , __a : Any ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Optional[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCamelCase__ = os.path.join(__a , """test.arrow""" ) with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(__a , 1 ) def __magic_name__ ( __a : Any ): '''simple docstring''' if pa.types.is_list(__a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __magic_name__ ( __a : Optional[int] , __a : Any ): '''simple docstring''' if isinstance(lst[0] , __a ): change_first_primitive_element_in_list(lst[0] , __a ) else: UpperCamelCase__ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] , __a : Tuple ): '''simple docstring''' UpperCamelCase__ = pa.array(TypedSequence(__a , optimized_int_type=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Optional[int] , __a : str , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCamelCase__ = copy.deepcopy(__a ) UpperCamelCase__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__a , __a ) UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __magic_name__ ( __a : List[str] , __a : List[str] ): '''simple docstring''' UpperCamelCase__ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=__a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __magic_name__ ( __a : Tuple ): '''simple docstring''' UpperCamelCase__ = """mock://dataset-train.arrow""" with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__a ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter(stream=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __magic_name__ ( __a : str , __a : Any ): '''simple docstring''' import PIL.Image UpperCamelCase__ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" ) UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter( stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) UpperCamelCase__ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , __a ) with open(__a , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] ) UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter(stream=__a ) as writer: writer._build_writer(inferred_schema=__a ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
86
1
from __future__ import annotations class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ , UpperCamelCase__ = text, pattern UpperCamelCase__ , UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCAmelCase_ (self ): # searches pattern in text and returns index positions UpperCamelCase__ = [] for i in range(self.textLen - self.patLen + 1 ): UpperCamelCase__ = self.mismatch_in_text(SCREAMING_SNAKE_CASE_ ) if mismatch_index == -1: positions.append(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = self.match_in_pattern(self.text[mismatch_index] ) UpperCamelCase__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowerCamelCase_ = '''ABAABA''' lowerCamelCase_ = '''AB''' lowerCamelCase_ = BoyerMooreSearch(text, pattern) lowerCamelCase_ = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
86
from sklearn.metrics import matthews_corrcoef import datasets lowerCamelCase_ = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' lowerCamelCase_ = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' lowerCamelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): return { "matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ), }
86
1
def __magic_name__ ( __a : int ): '''simple docstring''' if not isinstance(__a , __a ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) UpperCamelCase__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
86
def __magic_name__ ( __a : str ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = credit_card_number UpperCamelCase__ = 0 UpperCamelCase__ = len(__a ) - 2 for i in range(__a , -1 , -2 ): # double the value of every second digit UpperCamelCase__ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCamelCase__ = cc_number[:i] + str(__a ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__a ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = f"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(f"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(__a ) <= 16: print(f"{error_message} of its length." ) return False if not validate_initial_digits(__a ): print(f"{error_message} of its first two digits." ) return False if not luhn_validation(__a ): print(f"{error_message} it fails the Luhn check." ) return False print(f"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
86
1
def __magic_name__ ( __a : list , __a : int , __a : int = 0 , __a : int = 0 ): '''simple docstring''' UpperCamelCase__ = right or len(__a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(__a , __a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
def __magic_name__ ( __a : int = 50 ): '''simple docstring''' UpperCamelCase__ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
86
1
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = torch.exp(__a ) UpperCamelCase__ = torch.sum(__a , dim=1 ) # sum of exp(x_i) UpperCamelCase__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__a ) - B / A class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__() UpperCamelCase__ = config.output_attentions UpperCamelCase__ = config.output_hidden_states UpperCamelCase__ = nn.ModuleList([BertLayer(SCREAMING_SNAKE_CASE_ ) for _ in range(config.num_hidden_layers )] ) UpperCamelCase__ = nn.ModuleList([BertHighway(SCREAMING_SNAKE_CASE_ ) for _ in range(config.num_hidden_layers )] ) UpperCamelCase__ = [-1 for _ in range(config.num_hidden_layers )] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): if (type(SCREAMING_SNAKE_CASE_ ) is float) or (type(SCREAMING_SNAKE_CASE_ ) is int): for i in range(len(self.early_exit_entropy ) ): UpperCamelCase__ = x else: UpperCamelCase__ = x def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = () UpperCamelCase__ = () UpperCamelCase__ = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: UpperCamelCase__ = all_hidden_states + (hidden_states,) UpperCamelCase__ = layer_module( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , head_mask[i] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = layer_outputs[0] if self.output_attentions: UpperCamelCase__ = all_attentions + (layer_outputs[1],) UpperCamelCase__ = (hidden_states,) if self.output_hidden_states: UpperCamelCase__ = current_outputs + (all_hidden_states,) if self.output_attentions: UpperCamelCase__ = current_outputs + (all_attentions,) UpperCamelCase__ = self.highway[i](SCREAMING_SNAKE_CASE_ ) # logits, pooled_output if not self.training: UpperCamelCase__ = highway_exit[0] UpperCamelCase__ = entropy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy UpperCamelCase__ = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: UpperCamelCase__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(SCREAMING_SNAKE_CASE_ , i + 1 ) else: UpperCamelCase__ = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: UpperCamelCase__ = all_hidden_states + (hidden_states,) UpperCamelCase__ = (hidden_states,) if self.output_hidden_states: UpperCamelCase__ = outputs + (all_hidden_states,) if self.output_attentions: UpperCamelCase__ = outputs + (all_attentions,) UpperCamelCase__ = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( """The Bert Model transformer with early exiting (DeeBERT). """ , __lowerCamelCase , ) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = config UpperCamelCase__ = BertEmbeddings(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = DeeBertEncoder(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BertPooler(SCREAMING_SNAKE_CASE_ ) self.init_weights() def UpperCAmelCase_ (self ): self.encoder.init_highway_pooler(self.pooler ) def UpperCAmelCase_ (self ): return self.embeddings.word_embeddings def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = value def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(SCREAMING_SNAKE_CASE_ ) @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" ) elif input_ids is not None: UpperCamelCase__ = input_ids.size() elif inputs_embeds is not None: UpperCamelCase__ = inputs_embeds.size()[:-1] else: raise ValueError("""You have to specify either input_ids or inputs_embeds""" ) UpperCamelCase__ = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCamelCase__ = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if encoder_attention_mask is None: UpperCamelCase__ = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) if token_type_ids is None: UpperCamelCase__ = torch.zeros(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCamelCase__ = self.get_extended_attention_mask(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: UpperCamelCase__ = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: UpperCamelCase__ = encoder_attention_mask[:, None, None, :] UpperCamelCase__ = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility UpperCamelCase__ = (1.0 - encoder_extended_attention_mask) * -1_0000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCamelCase__ = self.get_head_mask(SCREAMING_SNAKE_CASE_ , self.config.num_hidden_layers ) UpperCamelCase__ = self.embeddings( input_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.encoder( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = encoder_outputs[0] UpperCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = message UpperCamelCase__ = exit_layer # start from 1! class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__() UpperCamelCase__ = BertPooler(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase__ = nn.Linear(config.hidden_size , config.num_labels ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # Pooler UpperCamelCase__ = encoder_outputs[0] UpperCamelCase__ = self.pooler(SCREAMING_SNAKE_CASE_ ) # "return" pooler_output # BertModel UpperCamelCase__ = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification UpperCamelCase__ = bmodel_output[1] UpperCamelCase__ = self.dropout(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.classifier(SCREAMING_SNAKE_CASE_ ) return logits, pooled_output @add_start_docstrings( """Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. """ , __lowerCamelCase , ) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = config.num_labels UpperCamelCase__ = config.num_hidden_layers UpperCamelCase__ = DeeBertModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = nn.Dropout(config.hidden_dropout_prob ) UpperCamelCase__ = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , ): UpperCamelCase__ = self.num_layers try: UpperCamelCase__ = self.bert( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits UpperCamelCase__ = outputs[1] UpperCamelCase__ = self.dropout(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.classifier(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCamelCase__ = e.message UpperCamelCase__ = e.exit_layer UpperCamelCase__ = outputs[0] if not self.training: UpperCamelCase__ = entropy(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [] UpperCamelCase__ = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCamelCase__ = MSELoss() UpperCamelCase__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase__ = CrossEntropyLoss() UpperCamelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits UpperCamelCase__ = [] for highway_exit in outputs[-1]: UpperCamelCase__ = highway_exit[0] if not self.training: highway_logits_all.append(SCREAMING_SNAKE_CASE_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCamelCase__ = MSELoss() UpperCamelCase__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: UpperCamelCase__ = CrossEntropyLoss() UpperCamelCase__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(SCREAMING_SNAKE_CASE_ ) if train_highway: UpperCamelCase__ = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCamelCase__ = (loss,) + outputs if not self.training: UpperCamelCase__ = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCamelCase__ = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
86
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = RobertaTokenizer SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""} def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = """Encode this sequence.""" UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens UpperCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Encode <mask> sequence""" UpperCamelCase__ = """Encode <mask>sequence""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A, <mask> AllenNLP sentence.""" UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCAmelCase_ (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
86
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''', } # fmt: off lowerCamelCase_ = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] lowerCamelCase_ = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """whisper""" SCREAMING_SNAKE_CASE__ = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = vocab_size UpperCamelCase__ = num_mel_bins UpperCamelCase__ = d_model UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = use_cache UpperCamelCase__ = encoder_layers UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True UpperCamelCase__ = max_source_positions UpperCamelCase__ = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. UpperCamelCase__ = classifier_proj_size UpperCamelCase__ = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase__ = apply_spec_augment UpperCamelCase__ = mask_time_prob UpperCamelCase__ = mask_time_length UpperCamelCase__ = mask_time_min_masks UpperCamelCase__ = mask_feature_prob UpperCamelCase__ = mask_feature_length UpperCamelCase__ = mask_feature_min_masks UpperCamelCase__ = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class __A( __lowerCamelCase ): """simple docstring""" @property def UpperCAmelCase_ (self ): UpperCamelCase__ = OrderedDict( [ ("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}), ] ) if self.use_past: UpperCamelCase__ = {0: """batch"""} else: UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""" ) return common_inputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ): UpperCamelCase__ = OrderedDict() UpperCamelCase__ = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = encoder_inputs["""input_features"""].shape[2] UpperCamelCase__ = encoder_sequence_length // 2 if self.use_past else seq_length UpperCamelCase__ = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoder_inputs.pop("""input_features""" ) UpperCamelCase__ = decoder_inputs.pop("""decoder_input_ids""" ) if "past_key_values" in decoder_inputs: UpperCamelCase__ = decoder_inputs.pop("""past_key_values""" ) return dummy_inputs @property def UpperCAmelCase_ (self ): return 1E-3
86
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __a : Any ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __a : List[Any] , __a : Any ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False elif args.student_type == "gpt2": UpperCamelCase__ = False def __magic_name__ ( __a : int , __a : Dict ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__a , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" ) UpperCamelCase__ = parser.parse_args() sanity_checks(__a ) # ARGS # init_gpu_params(__a ) set_seed(__a ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"Experiment will be dumped and logged in {args.dump_path}" ) # SAVE PARAMS # logger.info(f"Param: {args}" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__a ) , __a , indent=4 ) git_log(args.dump_path ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCamelCase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCamelCase__ = tokenizer.all_special_tokens.index(__a ) UpperCamelCase__ = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}" ) UpperCamelCase__ = special_tok_ids UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}" ) with open(args.data_file , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" ) with open(args.token_counts , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCamelCase__ = 0.0 # do not predict special tokens UpperCamelCase__ = torch.from_numpy(__a ) else: UpperCamelCase__ = None UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"Loading student config from {args.student_config}" ) UpperCamelCase__ = student_config_class.from_pretrained(args.student_config ) UpperCamelCase__ = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" ) UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a ) else: UpperCamelCase__ = student_model_class(__a ) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}" ) logger.info("""Student loaded.""" ) # TEACHER # UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a ) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}" ) logger.info(f"Teacher loaded from {args.teacher_name}." ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__a , __a ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__a , __a ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCamelCase__ = Distiller( params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
86
1
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowerCamelCase_ = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' lowerCamelCase_ = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' lowerCamelCase_ = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' lowerCamelCase_ = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' lowerCamelCase_ = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ (self ): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 1_00] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 ): if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""" ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor: UpperCamelCase__ = [] UpperCamelCase__ = Counter() UpperCamelCase__ = 0 UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE_ ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): for candidate in candidates: UpperCamelCase__ = candidate + """\n""" + test_case UpperCamelCase__ = (test_program, timeout, task_id, completion_id[task_id]) UpperCamelCase__ = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) futures.append(SCREAMING_SNAKE_CASE_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = future.result() results[result["task_id"]].append((result["""completion_id"""], result) ) UpperCamelCase__ , UpperCamelCase__ = [], [] for result in results.values(): result.sort() UpperCamelCase__ = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE_ ) ) correct.append(sum(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = k UpperCamelCase__ = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __magic_name__ ( __a : Any , __a : Optional[int] , __a : Any ): '''simple docstring''' def estimator(__a : int , __a : int , __a : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__a , __a ): UpperCamelCase__ = itertools.repeat(__a , len(__a ) ) else: assert len(__a ) == len(__a ) UpperCamelCase__ = iter(__a ) return np.array([estimator(int(__a ) , int(__a ) , __a ) for n, c in zip(__a , __a )] )
86
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
86
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CycleDiffusionPipeline SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { """negative_prompt""", """height""", """width""", """negative_prompt_embeds""", } SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {"""latents"""} SCREAMING_SNAKE_CASE__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} ) SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) UpperCamelCase__ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=10_00 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , ) torch.manual_seed(0 ) UpperCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) UpperCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ): UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image / 2 + 0.5 if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ): UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """prompt""": """An astronaut riding an elephant""", """source_prompt""": """An astronaut riding a horse""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """eta""": 0.1, """strength""": 0.8, """guidance_scale""": 3, """source_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ (self ): UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = output.images UpperCamelCase__ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) UpperCamelCase__ = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_dummy_components() for name, module in components.items(): if hasattr(SCREAMING_SNAKE_CASE_ , """half""" ): UpperCamelCase__ = module.half() UpperCamelCase__ = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = output.images UpperCamelCase__ = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) UpperCamelCase__ = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCAmelCase_ (self ): return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def UpperCAmelCase_ (self ): return super().test_inference_batch_single_identical() @skip_mps def UpperCAmelCase_ (self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def UpperCAmelCase_ (self ): return super().test_save_load_optional_components() @skip_mps def UpperCAmelCase_ (self ): return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ (self ): UpperCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) UpperCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) UpperCamelCase__ = init_image.resize((5_12, 5_12) ) UpperCamelCase__ = """CompVis/stable-diffusion-v1-4""" UpperCamelCase__ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" ) UpperCamelCase__ = CycleDiffusionPipeline.from_pretrained( SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() UpperCamelCase__ = """A black colored car""" UpperCamelCase__ = """A blue colored car""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , ) UpperCamelCase__ = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def UpperCAmelCase_ (self ): UpperCamelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) UpperCamelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) UpperCamelCase__ = init_image.resize((5_12, 5_12) ) UpperCamelCase__ = """CompVis/stable-diffusion-v1-4""" UpperCamelCase__ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" ) UpperCamelCase__ = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() UpperCamelCase__ = """A black colored car""" UpperCamelCase__ = """A blue colored car""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = pipe( prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type="""np""" , ) UpperCamelCase__ = output.images assert np.abs(image - expected_image ).max() < 2E-2
86
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ): '''simple docstring''' UpperCamelCase__ = size[0] - overlap_pixels * 2 UpperCamelCase__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 ) if "l" in remove_borders: UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ): '''simple docstring''' return max(__a , min(__a , __a ) ) def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def __magic_name__ ( __a : [int] , __a : int , __a : [int] ): '''simple docstring''' UpperCamelCase__ = list(__a ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] ) return rect def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__a , (original_slice, 0) ) return result def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) UpperCamelCase__ = tile.crop(__a ) return tile def __magic_name__ ( __a : List[str] , __a : Any ): '''simple docstring''' UpperCamelCase__ = n % d return n - divisor class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ): super().__init__( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): torch.manual_seed(0 ) UpperCamelCase__ = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size ) UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] UpperCamelCase__ = translated_slice_x - (original_image_slice / 2) UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = to_input.size UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0] UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) UpperCamelCase__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , ) final_image.paste( SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ): UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) UpperCamelCase__ = math.ceil(image.size[0] / tile_size ) UpperCamelCase__ = math.ceil(image.size[1] / tile_size ) UpperCamelCase__ = tcx * tcy UpperCamelCase__ = 0 for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): self._process_tile( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa ) UpperCamelCase__ = pipe.to("""cuda""" ) UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__a : Optional[int] ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
86
1
from math import ceil def __magic_name__ ( __a : Optional[Any] , __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = list(range(0 , __a ) ) UpperCamelCase__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check UpperCamelCase__ = [] for i in device_map_blocks: if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__a ) # Missing blocks UpperCamelCase__ = [i for i in blocks if i not in device_map_blocks] UpperCamelCase__ = [i for i in device_map_blocks if i not in blocks] if len(__a ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(__a ) ) if len(__a ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(__a ) ) if len(__a ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(__a ) ) def __magic_name__ ( __a : int , __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = list(range(__a ) ) UpperCamelCase__ = int(ceil(n_layers / len(__a ) ) ) UpperCamelCase__ = [layers[i : i + n_blocks] for i in range(0 , __a , __a )] return dict(zip(__a , __a ) )
86
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(SCREAMING_SNAKE_CASE_ )}." ) # get prompt text embeddings UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_embeddings.shape UpperCamelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ = 42 if negative_prompt is None: UpperCamelCase__ = [""""""] elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !=" F" {type(SCREAMING_SNAKE_CASE_ )}." ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCamelCase__ = negative_prompt UpperCamelCase__ = text_input_ids.shape[-1] UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , ) UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ = uncond_embeddings.shape[1] UpperCamelCase__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCamelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCamelCase__ = latents_reference.to(self.device ) UpperCamelCase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCamelCase__ = 0 if dx < 0 else dx UpperCamelCase__ = 0 if dy < 0 else dy UpperCamelCase__ = max(-dx , 0 ) UpperCamelCase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 ) UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 1 / 0.1_8215 * latents UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCamelCase__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to( self.device ) UpperCamelCase__ , UpperCamelCase__ = self.safety_checker( images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCamelCase__ = None if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
86
1
lowerCamelCase_ = { '''meter''': '''m''', '''kilometer''': '''km''', '''megametre''': '''Mm''', '''gigametre''': '''Gm''', '''terametre''': '''Tm''', '''petametre''': '''Pm''', '''exametre''': '''Em''', '''zettametre''': '''Zm''', '''yottametre''': '''Ym''', } # Exponent of the factor(meter) lowerCamelCase_ = { '''m''': 0, '''km''': 3, '''Mm''': 6, '''Gm''': 9, '''Tm''': 12, '''Pm''': 15, '''Em''': 18, '''Zm''': 21, '''Ym''': 24, } def __magic_name__ ( __a : float , __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = from_type.lower().strip("""s""" ) UpperCamelCase__ = to_type.lower().strip("""s""" ) UpperCamelCase__ = UNIT_SYMBOL.get(__a , __a ) UpperCamelCase__ = UNIT_SYMBOL.get(__a , __a ) if from_sanitized not in METRIC_CONVERSION: UpperCamelCase__ = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(__a )}" ) raise ValueError(__a ) if to_sanitized not in METRIC_CONVERSION: UpperCamelCase__ = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(__a )}" ) raise ValueError(__a ) UpperCamelCase__ = METRIC_CONVERSION[from_sanitized] UpperCamelCase__ = METRIC_CONVERSION[to_sanitized] UpperCamelCase__ = 1 if from_exponent > to_exponent: UpperCamelCase__ = from_exponent - to_exponent else: UpperCamelCase__ = -(to_exponent - from_exponent) return value * pow(10 , __a ) if __name__ == "__main__": from doctest import testmod testmod()
86
from ..utils import DummyObject, requires_backends class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] )
86
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
from __future__ import annotations from typing import TypedDict class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__a ) )] def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) UpperCamelCase__ = all_rotations(__a ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCamelCase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__a ), } return response def __magic_name__ ( __a : str , __a : int ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: UpperCamelCase__ = int(__a ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__a ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) UpperCamelCase__ = [""""""] * len(__a ) for _ in range(len(__a ) ): for i in range(len(__a ) ): UpperCamelCase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase_ = '''Provide a string that I will generate its BWT transform: ''' lowerCamelCase_ = input(entry_msg).strip() lowerCamelCase_ = bwt_transform(s) print( f'Burrows Wheeler transform for string \'{s}\' results ' f'in \'{result["bwt_string"]}\'' ) lowerCamelCase_ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' f'we get original string \'{original_string}\'' )
86
1
def __magic_name__ ( __a : list , __a : int = 0 ): '''simple docstring''' UpperCamelCase__ = length or len(__a ) UpperCamelCase__ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: UpperCamelCase__ , UpperCamelCase__ = list_data[i + 1], list_data[i] UpperCamelCase__ = True return list_data if not swapped else bubble_sort(__a , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
86
import os from datetime import datetime as dt from github import Github lowerCamelCase_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" ) UpperCamelCase__ = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
86
1
def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = len(__a ) while cur > 1: # Find the maximum number in arr UpperCamelCase__ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCamelCase__ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list UpperCamelCase__ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase_ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = torch.from_numpy(__a ) return 2.0 * image - 1.0 class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ): if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): UpperCamelCase__ = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase__ = next(self.unet.parameters() ).dtype UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device ) UpperCamelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase__ = torch.cat([latents, image] , dim=1 ) UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase__ = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = torch.clamp(SCREAMING_SNAKE_CASE_ , -1.0 , 1.0 ) UpperCamelCase__ = image / 2 + 0.5 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
1
def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' return "\n".join( f"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
86
def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = len(__a ) UpperCamelCase__ = len(__a ) UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase__ = True for i in range(__a ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase__ = True if a[i].islower(): UpperCamelCase__ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
86
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = self.unet.config.sample_size UpperCamelCase__ = (batch_size, 3, img_size, img_size) UpperCamelCase__ = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCamelCase__ = self.scheduler.schedule[t] UpperCamelCase__ = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCamelCase__ , UpperCamelCase__ = self.scheduler.add_noise_to_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCamelCase__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCamelCase__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCamelCase__ = self.scheduler.step_correct( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , step_output.prev_sample , step_output["""derivative"""] , ) UpperCamelCase__ = step_output.prev_sample UpperCamelCase__ = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
1
def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = [0] * len(__a ) UpperCamelCase__ = [] UpperCamelCase__ = [1] * len(__a ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__a ) ): if indegree[i] == 0: queue.append(__a ) while queue: UpperCamelCase__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(__a ) print(max(__a ) ) # Adjacency list of Graph lowerCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
86
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) UpperCamelCase__ = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , ) torch.manual_seed(0 ) UpperCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) UpperCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ): if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ): UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ (self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) UpperCamelCase__ = sag_pipe.to(SCREAMING_SNAKE_CASE_ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """.""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) UpperCamelCase__ = output.images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) UpperCamelCase__ = sag_pipe.to(SCREAMING_SNAKE_CASE_ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """.""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" ) UpperCamelCase__ = output.images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCamelCase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) UpperCamelCase__ = sag_pipe.to(SCREAMING_SNAKE_CASE_ ) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """.""" UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = sag_pipe( [prompt] , width=7_68 , height=5_12 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) UpperCamelCase__ = output.images assert image.shape == (1, 5_12, 7_68, 3)
86
from PIL import Image def __magic_name__ ( __a : Image , __a : float ): '''simple docstring''' def brightness(__a : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 lowerCamelCase_ = change_brightness(img, 1_00) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
86
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_roberta_prelayernorm''': [ '''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaPreLayerNormConfig''', '''RobertaPreLayerNormOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaPreLayerNormForCausalLM''', '''RobertaPreLayerNormForMaskedLM''', '''RobertaPreLayerNormForMultipleChoice''', '''RobertaPreLayerNormForQuestionAnswering''', '''RobertaPreLayerNormForSequenceClassification''', '''RobertaPreLayerNormForTokenClassification''', '''RobertaPreLayerNormModel''', '''RobertaPreLayerNormPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaPreLayerNormForCausalLM''', '''TFRobertaPreLayerNormForMaskedLM''', '''TFRobertaPreLayerNormForMultipleChoice''', '''TFRobertaPreLayerNormForQuestionAnswering''', '''TFRobertaPreLayerNormForSequenceClassification''', '''TFRobertaPreLayerNormForTokenClassification''', '''TFRobertaPreLayerNormMainLayer''', '''TFRobertaPreLayerNormModel''', '''TFRobertaPreLayerNormPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FlaxRobertaPreLayerNormForCausalLM''', '''FlaxRobertaPreLayerNormForMaskedLM''', '''FlaxRobertaPreLayerNormForMultipleChoice''', '''FlaxRobertaPreLayerNormForQuestionAnswering''', '''FlaxRobertaPreLayerNormForSequenceClassification''', '''FlaxRobertaPreLayerNormForTokenClassification''', '''FlaxRobertaPreLayerNormModel''', '''FlaxRobertaPreLayerNormPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCamelCase_ = [None] * 10_00_00_00 lowerCamelCase_ = True lowerCamelCase_ = False def __magic_name__ ( __a : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCamelCase__ = chain(next_number(__a ) ) UpperCamelCase__ = number_chain while number < 10_000_000: UpperCamelCase__ = number_chain number *= 10 return number_chain def __magic_name__ ( __a : int = 10_000_000 ): '''simple docstring''' for i in range(1 , __a ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__a ) if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution() = }')
86
1
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 ): UpperCamelCase__ = 1.0 if scale is None else scale UpperCamelCase__ = 0.0 if loc is None else loc super().__init__(SCREAMING_SNAKE_CASE_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE_ )] ) @property def UpperCAmelCase_ (self ): return self.base_dist.mean * self.scale + self.loc @property def UpperCAmelCase_ (self ): return self.base_dist.variance * self.scale**2 @property def UpperCAmelCase_ (self ): return self.variance.sqrt() class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = args_dim UpperCamelCase__ = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for dim in args_dim.values()] ) UpperCamelCase__ = domain_map def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [proj(SCREAMING_SNAKE_CASE_ ) for proj in self.proj] return self.domain_map(*SCREAMING_SNAKE_CASE_ ) class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__() UpperCamelCase__ = function def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): return self.function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) class __A: """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__(self , SCREAMING_SNAKE_CASE_ = 1 ): UpperCamelCase__ = dim UpperCamelCase__ = {k: dim * self.args_dim[k] for k in self.args_dim} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): if self.dim == 1: return self.distribution_class(*SCREAMING_SNAKE_CASE_ ) else: return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE_ ) , 1 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = self._base_distribution(SCREAMING_SNAKE_CASE_ ) if loc is None and scale is None: return distr else: return AffineTransformed(SCREAMING_SNAKE_CASE_ , loc=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , event_dim=self.event_dim ) @property def UpperCAmelCase_ (self ): return () if self.dim == 1 else (self.dim,) @property def UpperCAmelCase_ (self ): return len(self.event_shape ) @property def UpperCAmelCase_ (self ): return 0.0 def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): return ParameterProjection( in_features=SCREAMING_SNAKE_CASE_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ ): raise NotImplementedError() @staticmethod def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ ): return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE_ ) + 4.0 )) / 2.0 class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"df": 1, "loc": 1, "scale": 1} SCREAMING_SNAKE_CASE__ = StudentT @classmethod def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCamelCase__ = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE_ ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"loc": 1, "scale": 1} SCREAMING_SNAKE_CASE__ = Normal @classmethod def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"total_count": 1, "logits": 1} SCREAMING_SNAKE_CASE__ = NegativeBinomial @classmethod def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ , UpperCamelCase__ = distr_args if self.dim == 1: return self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) else: return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) , 1 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase__ , UpperCamelCase__ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
86
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(__a , __a ) lowerCamelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = list(s_dict.keys() ) for key in keys: UpperCamelCase__ = key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCamelCase__ = new_key.replace(__a , __a ) print(f"{key} -> {new_key}" ) UpperCamelCase__ = s_dict.pop(__a ) return s_dict def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.basename(__a ) UpperCamelCase__ = url.split("""/""" )[-2] UpperCamelCase__ = os.path.join(__a , __a ) if os.path.exists(__a ) and not os.path.isfile(__a ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(__a ): UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop: while True: UpperCamelCase__ = source.read(8_192 ) if not buffer: break output.write(__a ) loop.update(len(__a ) ) UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ): '''simple docstring''' if ".pt" not in checkpoint_path: UpperCamelCase__ = _download(_MODELS[checkpoint_path] ) else: UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = original_checkpoint["""dims"""] UpperCamelCase__ = original_checkpoint["""model_state_dict"""] UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(__a ) rename_keys(__a ) UpperCamelCase__ = True UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] UpperCamelCase__ = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) UpperCamelCase__ = WhisperForConditionalGeneration(__a ) UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase__ = proj_out_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
86
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase_ = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = [[0 for _ in range(__a )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCamelCase__ = 1 for n in range(m + 1 ): for k in range(1 , __a ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase_ = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCamelCase_ = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
86
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """vit_mae""" def __init__(self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=0.75 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = image_size UpperCamelCase__ = patch_size UpperCamelCase__ = num_channels UpperCamelCase__ = qkv_bias UpperCamelCase__ = decoder_num_attention_heads UpperCamelCase__ = decoder_hidden_size UpperCamelCase__ = decoder_num_hidden_layers UpperCamelCase__ = decoder_intermediate_size UpperCamelCase__ = mask_ratio UpperCamelCase__ = norm_pix_loss
86
class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if sources is int: UpperCamelCase__ = [sources] if sinks is int: UpperCamelCase__ = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return UpperCamelCase__ = sources[0] UpperCamelCase__ = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: UpperCamelCase__ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase__ = max_input_flow UpperCamelCase__ = 0 UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase__ = max_input_flow UpperCamelCase__ = size - 1 def UpperCAmelCase_ (self ): if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = algorithm(self ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = flow_network UpperCamelCase__ = flow_network.verticesCount UpperCamelCase__ = flow_network.sourceIndex UpperCamelCase__ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase__ = flow_network.graph UpperCamelCase__ = False def UpperCAmelCase_ (self ): if not self.executed: self._algorithm() UpperCamelCase__ = True def UpperCAmelCase_ (self ): pass class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result UpperCamelCase__ = -1 def UpperCAmelCase_ (self ): if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase__ = [0] * self.verticies_count UpperCamelCase__ = [0] * self.verticies_count def UpperCAmelCase_ (self ): UpperCamelCase__ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase__ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase__ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = vertices_list[i] UpperCamelCase__ = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = 0 else: i += 1 UpperCamelCase__ = sum(self.preflow[self.source_index] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase__ = self.heights[to_index] if min_height is not None: UpperCamelCase__ = min_height + 1 if __name__ == "__main__": lowerCamelCase_ = [0] lowerCamelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowerCamelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowerCamelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowerCamelCase_ = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
86
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """timm_backbone""" def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = backbone UpperCamelCase__ = num_channels UpperCamelCase__ = features_only UpperCamelCase__ = use_pretrained_backbone UpperCamelCase__ = True UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
86
from timeit import timeit def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: number &= number - 1 result += 1 return result def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __magic_name__ ( ): '''simple docstring''' def do_benchmark(__a : int ) -> None: UpperCamelCase__ = """import __main__ as z""" print(f"Benchmark when {number = }:" ) print(f"{get_set_bits_count_using_modulo_operator(__a ) = }" ) UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a ) print(f"timeit() runs in {timing} seconds" ) print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" ) UpperCamelCase__ = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , ) print(f"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
86
1
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CLIPTokenizer SCREAMING_SNAKE_CASE__ = CLIPTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): super().setUp() # fmt: off UpperCamelCase__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) @require_ftfy def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d.""" UpperCamelCase__ = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways UpperCamelCase__ = """xa\u0303y""" + """ """ + """x\xe3y""" UpperCamelCase__ = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test that the tokenization is identical on unicode of space type UpperCamelCase__ = [ """\u0009""", # (horizontal tab, '\t') """\u000B""", # (vertical tab) """\u000C""", # (form feed) """\u0020""", # (space, ' ') """\u200E""", # (left-to-right mark):w """\u200F""", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: UpperCamelCase__ = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Test that the tokenization is identical on unicode of line break type UpperCamelCase__ = [ """\u000A""", # (line feed, '\n') """\r\n""", # (carriage return and line feed, '\r\n') """\u000D""", # (carriage return, '\r') """\r""", # (carriage return, '\r') """\u000D""", # (carriage return, '\r') """\u2028""", # (line separator) """\u2029""", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: UpperCamelCase__ = tokenizer_s.tokenize(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) def UpperCAmelCase_ (self ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as context: self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" ) self.assertTrue( context.exception.args[0].startswith( """The `backend_tokenizer` provided does not match the expected format.""" ) ) @require_ftfy def UpperCAmelCase_ (self ): super().test_tokenization_python_rust_equals() def UpperCAmelCase_ (self ): # CLIP always lower cases letters pass
86
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __A( __lowerCamelCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def UpperCAmelCase_ (self ): import PIL.Image UpperCamelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects: UpperCamelCase__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) UpperCamelCase__ , UpperCamelCase__ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __magic_name__ ( __a : List[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Tuple , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=__a , features=__a ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() UpperCamelCase__ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__a ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : List[Any] , __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Union[str, Any] , __a : Any ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Optional[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCamelCase__ = os.path.join(__a , """test.arrow""" ) with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(__a , 1 ) def __magic_name__ ( __a : Any ): '''simple docstring''' if pa.types.is_list(__a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __magic_name__ ( __a : Optional[int] , __a : Any ): '''simple docstring''' if isinstance(lst[0] , __a ): change_first_primitive_element_in_list(lst[0] , __a ) else: UpperCamelCase__ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] , __a : Tuple ): '''simple docstring''' UpperCamelCase__ = pa.array(TypedSequence(__a , optimized_int_type=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Optional[int] , __a : str , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCamelCase__ = copy.deepcopy(__a ) UpperCamelCase__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__a , __a ) UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __magic_name__ ( __a : List[str] , __a : List[str] ): '''simple docstring''' UpperCamelCase__ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=__a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __magic_name__ ( __a : Tuple ): '''simple docstring''' UpperCamelCase__ = """mock://dataset-train.arrow""" with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__a ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter(stream=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __magic_name__ ( __a : str , __a : Any ): '''simple docstring''' import PIL.Image UpperCamelCase__ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" ) UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter( stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) UpperCamelCase__ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , __a ) with open(__a , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] ) UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter(stream=__a ) as writer: writer._build_writer(inferred_schema=__a ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
86
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } lowerCamelCase_ = { '''roberta-base''': 5_12, '''roberta-large''': 5_12, '''roberta-large-mnli''': 5_12, '''distilroberta-base''': 5_12, '''roberta-base-openai-detector''': 5_12, '''roberta-large-openai-detector''': 5_12, } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE__ = RobertaTokenizer def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ): super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop("""type""" ) ) UpperCamelCase__ = add_prefix_space UpperCamelCase__ = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = add_prefix_space UpperCamelCase__ = """post_processor""" UpperCamelCase__ = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: UpperCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCamelCase__ = tuple(state["""sep"""] ) if "cls" in state: UpperCamelCase__ = tuple(state["""cls"""] ) UpperCamelCase__ = False if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: UpperCamelCase__ = add_prefix_space UpperCamelCase__ = True if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE_ ) != trim_offsets: UpperCamelCase__ = trim_offsets UpperCamelCase__ = True if changes_to_apply: UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , state.pop("""type""" ) ) UpperCamelCase__ = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @property def UpperCAmelCase_ (self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value UpperCamelCase__ = value def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): UpperCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ): UpperCamelCase__ = [self.sep_token_id] UpperCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
86
from sklearn.metrics import matthews_corrcoef import datasets lowerCamelCase_ = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' lowerCamelCase_ = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' lowerCamelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): return { "matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ), }
86
1
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig lowerCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """ernie_m""" SCREAMING_SNAKE_CASE__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__(self , SCREAMING_SNAKE_CASE_ = 25_00_02 , SCREAMING_SNAKE_CASE_ = 7_68 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 30_72 , SCREAMING_SNAKE_CASE_ = "gelu" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 5_14 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1E-05 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0 , **SCREAMING_SNAKE_CASE_ , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = classifier_dropout UpperCamelCase__ = is_decoder UpperCamelCase__ = act_dropout
86
def __magic_name__ ( __a : str ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = credit_card_number UpperCamelCase__ = 0 UpperCamelCase__ = len(__a ) - 2 for i in range(__a , -1 , -2 ): # double the value of every second digit UpperCamelCase__ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCamelCase__ = cc_number[:i] + str(__a ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__a ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = f"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(f"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(__a ) <= 16: print(f"{error_message} of its length." ) return False if not validate_initial_digits(__a ): print(f"{error_message} of its first two digits." ) return False if not luhn_validation(__a ): print(f"{error_message} it fails the Luhn check." ) return False print(f"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
86
1
import heapq as hq import math from collections.abc import Iterator class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = str(id_ ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = [] UpperCamelCase__ = {} # {vertex:distance} def __lt__(self , SCREAMING_SNAKE_CASE_ ): return self.key < other.key def __repr__(self ): return self.id def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): self.neighbors.append(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = weight def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : List[Any] , __a : Dict ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __a ) graph[b - 1].add_edge(graph[a - 1] , __a ) def __magic_name__ ( __a : list , __a : Vertex ): '''simple docstring''' UpperCamelCase__ = [] for u in graph: UpperCamelCase__ = math.inf UpperCamelCase__ = None UpperCamelCase__ = 0 UpperCamelCase__ = graph[:] while q: UpperCamelCase__ = min(__a ) q.remove(__a ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): UpperCamelCase__ = u UpperCamelCase__ = u.edges[v.id] for i in range(1 , len(__a ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def __magic_name__ ( __a : list , __a : Vertex ): '''simple docstring''' for u in graph: UpperCamelCase__ = math.inf UpperCamelCase__ = None UpperCamelCase__ = 0 UpperCamelCase__ = list(__a ) hq.heapify(__a ) while h: UpperCamelCase__ = hq.heappop(__a ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): UpperCamelCase__ = u UpperCamelCase__ = u.edges[v.id] hq.heapify(__a ) for i in range(1 , len(__a ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def __magic_name__ ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
86
def __magic_name__ ( __a : int = 50 ): '''simple docstring''' UpperCamelCase__ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
86
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __A( unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 20} UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = num_channels UpperCamelCase__ = image_size UpperCamelCase__ = min_resolution UpperCamelCase__ = max_resolution UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = do_center_crop UpperCamelCase__ = crop_size def UpperCAmelCase_ (self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase_ (self ): UpperCamelCase__ = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase_ (self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_center_crop""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """crop_size""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): # Initialize image_processing UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ (self ): # Initialize image_processing UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def UpperCAmelCase_ (self ): # Initialize image_processing UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
86
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = RobertaTokenizer SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""} def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = """Encode this sequence.""" UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens UpperCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Encode <mask> sequence""" UpperCamelCase__ = """Encode <mask>sequence""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A, <mask> AllenNLP sentence.""" UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCAmelCase_ (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
86
1
lowerCamelCase_ = '''Input must be a string of 8 numbers plus letter''' lowerCamelCase_ = '''TRWAGMYFPDXBNJZSQVHLCKE''' def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): UpperCamelCase__ = f"Expected string as input, found {type(__a ).__name__}" raise TypeError(__a ) UpperCamelCase__ = spanish_id.replace("""-""" , """""" ).upper() if len(__a ) != 9: raise ValueError(__a ) try: UpperCamelCase__ = int(spanish_id_clean[0:8] ) UpperCamelCase__ = spanish_id_clean[8] except ValueError as ex: raise ValueError(__a ) from ex if letter.isdigit(): raise ValueError(__a ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
86
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __a : Any ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __a : List[Any] , __a : Any ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False elif args.student_type == "gpt2": UpperCamelCase__ = False def __magic_name__ ( __a : int , __a : Dict ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__a , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" ) UpperCamelCase__ = parser.parse_args() sanity_checks(__a ) # ARGS # init_gpu_params(__a ) set_seed(__a ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"Experiment will be dumped and logged in {args.dump_path}" ) # SAVE PARAMS # logger.info(f"Param: {args}" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__a ) , __a , indent=4 ) git_log(args.dump_path ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCamelCase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCamelCase__ = tokenizer.all_special_tokens.index(__a ) UpperCamelCase__ = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}" ) UpperCamelCase__ = special_tok_ids UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}" ) with open(args.data_file , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" ) with open(args.token_counts , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCamelCase__ = 0.0 # do not predict special tokens UpperCamelCase__ = torch.from_numpy(__a ) else: UpperCamelCase__ = None UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"Loading student config from {args.student_config}" ) UpperCamelCase__ = student_config_class.from_pretrained(args.student_config ) UpperCamelCase__ = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" ) UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a ) else: UpperCamelCase__ = student_model_class(__a ) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}" ) logger.info("""Student loaded.""" ) # TEACHER # UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a ) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}" ) logger.info(f"Teacher loaded from {args.teacher_name}." ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__a , __a ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__a , __a ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCamelCase__ = Distiller( params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
86
1
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __magic_name__ ( __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) ) UpperCamelCase__ = FileLock(str(tmpdir / """foo.lock""" ) ) UpperCamelCase__ = 0.01 with locka.acquire(): with pytest.raises(__a ): UpperCamelCase__ = time.time() locka.acquire(__a ) assert time.time() - _start > timeout def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = """a""" * 1_000 + """.lock""" UpperCamelCase__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(__a ) assert len(os.path.basename(locka._lock_file ) ) <= 255 UpperCamelCase__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__a ): locka.acquire(0 )
86
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
86
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class __A( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__(self , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE_ = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE_ = (64,) , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "silu" , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.1_8215 , SCREAMING_SNAKE_CASE_ = "group" , ): super().__init__() # pass init params to Encoder UpperCamelCase__ = Encoder( in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , down_block_types=SCREAMING_SNAKE_CASE_ , block_out_channels=SCREAMING_SNAKE_CASE_ , layers_per_block=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , double_z=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = VectorQuantizer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=0.25 , remap=SCREAMING_SNAKE_CASE_ , sane_index_shape=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = nn.Convad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) # pass init params to Decoder UpperCamelCase__ = Decoder( in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , up_block_types=SCREAMING_SNAKE_CASE_ , block_out_channels=SCREAMING_SNAKE_CASE_ , layers_per_block=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , norm_type=SCREAMING_SNAKE_CASE_ , ) @apply_forward_hook def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ): UpperCamelCase__ = self.encoder(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.quant_conv(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (h,) return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE_ ) @apply_forward_hook def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True ): # also go through quantization layer if not force_not_quantize: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.quantize(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = h UpperCamelCase__ = self.post_quant_conv(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.decoder(SCREAMING_SNAKE_CASE_ , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True ): UpperCamelCase__ = sample UpperCamelCase__ = self.encode(SCREAMING_SNAKE_CASE_ ).latents UpperCamelCase__ = self.decode(SCREAMING_SNAKE_CASE_ ).sample if not return_dict: return (dec,) return DecoderOutput(sample=SCREAMING_SNAKE_CASE_ )
86
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ): '''simple docstring''' UpperCamelCase__ = size[0] - overlap_pixels * 2 UpperCamelCase__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 ) if "l" in remove_borders: UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ): '''simple docstring''' return max(__a , min(__a , __a ) ) def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def __magic_name__ ( __a : [int] , __a : int , __a : [int] ): '''simple docstring''' UpperCamelCase__ = list(__a ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] ) return rect def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__a , (original_slice, 0) ) return result def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) UpperCamelCase__ = tile.crop(__a ) return tile def __magic_name__ ( __a : List[str] , __a : Any ): '''simple docstring''' UpperCamelCase__ = n % d return n - divisor class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ): super().__init__( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): torch.manual_seed(0 ) UpperCamelCase__ = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size ) UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] UpperCamelCase__ = translated_slice_x - (original_image_slice / 2) UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = to_input.size UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0] UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) UpperCamelCase__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , ) final_image.paste( SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ): UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) UpperCamelCase__ = math.ceil(image.size[0] / tile_size ) UpperCamelCase__ = math.ceil(image.size[1] / tile_size ) UpperCamelCase__ = tcx * tcy UpperCamelCase__ = 0 for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): self._process_tile( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa ) UpperCamelCase__ = pipe.to("""cuda""" ) UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__a : Optional[int] ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
86
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger lowerCamelCase_ = get_logger(__name__) lowerCamelCase_ = Path(__file__).parent / '''model_card_template.md''' lowerCamelCase_ = uuida().hex lowerCamelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCamelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES lowerCamelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def __magic_name__ ( __a : Union[Dict, str, None] = None ): '''simple docstring''' UpperCamelCase__ = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_flax_available(): ua += f"; jax/{_jax_version}" ua += f"; flax/{_flax_version}" if is_onnx_available(): ua += f"; onnxruntime/{_onnxruntime_version}" # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__a , __a ): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() ) elif isinstance(__a , __a ): ua += "; " + user_agent return ua def __magic_name__ ( __a : str , __a : Optional[str] = None , __a : Optional[str] = None ): '''simple docstring''' if token is None: UpperCamelCase__ = HfFolder.get_token() if organization is None: UpperCamelCase__ = whoami(__a )["""name"""] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def __magic_name__ ( __a : int , __a : List[str] ): '''simple docstring''' if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(__a , """local_rank""" ) and args.local_rank not in [-1, 0]: return UpperCamelCase__ = args.hub_token if hasattr(__a , """hub_token""" ) else None UpperCamelCase__ = get_full_repo_name(__a , token=__a ) UpperCamelCase__ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__a , model_name=__a , repo_name=__a , dataset_name=args.dataset_name if hasattr(__a , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__a , """gradient_accumulation_steps""" ) else None ) , adam_betaa=args.adam_betaa if hasattr(__a , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(__a , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__a , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(__a , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(__a , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__a , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__a , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(__a , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(__a , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , ) UpperCamelCase__ = os.path.join(args.output_dir , """README.md""" ) model_card.save(__a ) def __magic_name__ ( __a : Optional[str] , __a : Optional[str] = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCamelCase__ = str(Path(__a ).as_posix() ) UpperCamelCase__ = re.search(R"""snapshots/([^/]+)/""" , __a ) if search is None: return None UpperCamelCase__ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. lowerCamelCase_ = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) lowerCamelCase_ = os.path.join(hf_cache_home, '''diffusers''') def __magic_name__ ( __a : Optional[str] = None , __a : Optional[str] = None ): '''simple docstring''' if new_cache_dir is None: UpperCamelCase__ = DIFFUSERS_CACHE if old_cache_dir is None: UpperCamelCase__ = old_diffusers_cache UpperCamelCase__ = Path(__a ).expanduser() UpperCamelCase__ = Path(__a ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCamelCase__ = new_cache_dir / old_blob_path.relative_to(__a ) new_blob_path.parent.mkdir(parents=__a , exist_ok=__a ) os.replace(__a , __a ) try: os.symlink(__a , __a ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). lowerCamelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): lowerCamelCase_ = 0 else: with open(cache_version_file) as f: try: lowerCamelCase_ = int(f.read()) except ValueError: lowerCamelCase_ = 0 if cache_version < 1: lowerCamelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: lowerCamelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ' '''the directory exists and can be written to.''' ) def __magic_name__ ( __a : str , __a : Optional[str] = None ): '''simple docstring''' if variant is not None: UpperCamelCase__ = weights_name.split(""".""" ) UpperCamelCase__ = splits[:-1] + [variant] + splits[-1:] UpperCamelCase__ = """.""".join(__a ) return weights_name def __magic_name__ ( __a : Optional[int] , *, __a : List[str] , __a : List[str] , __a : List[str] , __a : Tuple , __a : Any , __a : int , __a : Tuple , __a : List[Any] , __a : List[Any] , __a : List[Any] , __a : List[Any]=None , ): '''simple docstring''' UpperCamelCase__ = str(__a ) if os.path.isfile(__a ): return pretrained_model_name_or_path elif os.path.isdir(__a ): if os.path.isfile(os.path.join(__a , __a ) ): # Load from a PyTorch checkpoint UpperCamelCase__ = os.path.join(__a , __a ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__a , __a , __a ) ): UpperCamelCase__ = os.path.join(__a , __a , __a ) return model_file else: raise EnvironmentError( f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__a ).base_version ) >= version.parse("""0.20.0""" ) ): try: UpperCamelCase__ = hf_hub_download( __a , filename=_add_variant(__a , __a ) , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , ) warnings.warn( f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , __a , ) return model_file except: # noqa: E722 warnings.warn( f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a , __a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__a , __a )}' so that the correct variant file can be added." , __a , ) try: # 2. Load model file as usual UpperCamelCase__ = hf_hub_download( __a , filename=__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , user_agent=__a , subfolder=__a , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " """this model name. Check the model page at """ f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." ) except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." ) except HTTPError as err: raise EnvironmentError( f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" ) except ValueError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" f" directory containing a file named {weights_name} or" """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " f"containing a file named {weights_name}" )
86
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(SCREAMING_SNAKE_CASE_ )}." ) # get prompt text embeddings UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_embeddings.shape UpperCamelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ = 42 if negative_prompt is None: UpperCamelCase__ = [""""""] elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !=" F" {type(SCREAMING_SNAKE_CASE_ )}." ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCamelCase__ = negative_prompt UpperCamelCase__ = text_input_ids.shape[-1] UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , ) UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ = uncond_embeddings.shape[1] UpperCamelCase__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCamelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCamelCase__ = latents_reference.to(self.device ) UpperCamelCase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCamelCase__ = 0 if dx < 0 else dx UpperCamelCase__ = 0 if dy < 0 else dy UpperCamelCase__ = max(-dx , 0 ) UpperCamelCase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 ) UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 1 / 0.1_8215 * latents UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCamelCase__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to( self.device ) UpperCamelCase__ , UpperCamelCase__ = self.safety_checker( images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCamelCase__ = None if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
86
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase_ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
from ..utils import DummyObject, requires_backends class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] )
86
1
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCamelCase_ = logging.getLogger(__name__) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser( description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" ) parser.add_argument("""--file_path""" , type=__a , default="""data/dump.txt""" , help="""The path to the data.""" ) parser.add_argument("""--tokenizer_type""" , type=__a , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] ) parser.add_argument("""--tokenizer_name""" , type=__a , default="""bert-base-uncased""" , help="""The tokenizer to use.""" ) parser.add_argument("""--dump_file""" , type=__a , default="""data/dump""" , help="""The dump file prefix.""" ) UpperCamelCase__ = parser.parse_args() logger.info(f"Loading Tokenizer ({args.tokenizer_name})" ) if args.tokenizer_type == "bert": UpperCamelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name ) UpperCamelCase__ = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]` UpperCamelCase__ = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]` elif args.tokenizer_type == "roberta": UpperCamelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name ) UpperCamelCase__ = tokenizer.special_tokens_map["""cls_token"""] # `<s>` UpperCamelCase__ = tokenizer.special_tokens_map["""sep_token"""] # `</s>` elif args.tokenizer_type == "gpt2": UpperCamelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name ) UpperCamelCase__ = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>` UpperCamelCase__ = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>` logger.info(f"Loading text from {args.file_path}" ) with open(args.file_path , """r""" , encoding="""utf8""" ) as fp: UpperCamelCase__ = fp.readlines() logger.info("""Start encoding""" ) logger.info(f"{len(__a )} examples to process." ) UpperCamelCase__ = [] UpperCamelCase__ = 0 UpperCamelCase__ = 10_000 UpperCamelCase__ = time.time() for text in data: UpperCamelCase__ = f"{bos} {text.strip()} {sep}" UpperCamelCase__ = tokenizer.encode(__a , add_special_tokens=__a ) rslt.append(__a ) iter += 1 if iter % interval == 0: UpperCamelCase__ = time.time() logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" ) UpperCamelCase__ = time.time() logger.info("""Finished binarization""" ) logger.info(f"{len(__a )} examples processed." ) UpperCamelCase__ = f"{args.dump_file}.{args.tokenizer_name}.pickle" UpperCamelCase__ = tokenizer.vocab_size if vocab_size < (1 << 16): UpperCamelCase__ = [np.uintaa(__a ) for d in rslt] else: UpperCamelCase__ = [np.intaa(__a ) for d in rslt] random.shuffle(rslt_ ) logger.info(f"Dump to {dp_file}" ) with open(__a , """wb""" ) as handle: pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
86
from __future__ import annotations from typing import TypedDict class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__a ) )] def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) UpperCamelCase__ = all_rotations(__a ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCamelCase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__a ), } return response def __magic_name__ ( __a : str , __a : int ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: UpperCamelCase__ = int(__a ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__a ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) UpperCamelCase__ = [""""""] * len(__a ) for _ in range(len(__a ) ): for i in range(len(__a ) ): UpperCamelCase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase_ = '''Provide a string that I will generate its BWT transform: ''' lowerCamelCase_ = input(entry_msg).strip() lowerCamelCase_ = bwt_transform(s) print( f'Burrows Wheeler transform for string \'{s}\' results ' f'in \'{result["bwt_string"]}\'' ) lowerCamelCase_ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' f'we get original string \'{original_string}\'' )
86
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class __A( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__(self , SCREAMING_SNAKE_CASE_ = 7_68 , ): super().__init__() UpperCamelCase__ = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) ) return self def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = (embeds - self.mean) * 1.0 / self.std return embeds def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = (embeds * self.std) + self.mean return embeds
86
import os from datetime import datetime as dt from github import Github lowerCamelCase_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" ) UpperCamelCase__ = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
86
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A( unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = image_size UpperCamelCase__ = num_channels UpperCamelCase__ = embeddings_size UpperCamelCase__ = hidden_sizes UpperCamelCase__ = depths UpperCamelCase__ = is_training UpperCamelCase__ = use_labels UpperCamelCase__ = hidden_act UpperCamelCase__ = num_labels UpperCamelCase__ = scope UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ = self.get_config() return config, pixel_values def UpperCAmelCase_ (self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = FlaxRegNetModel(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = FlaxRegNetForImageClassification(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ = config_and_inputs UpperCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxRegNetModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ (self ): return def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def UpperCAmelCase_ (self ): pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ = [*signature.parameters.keys()] UpperCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase__ = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 ) UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase__ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class __A( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ (self ): return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) UpperCamelCase__ = self.default_image_processor UpperCamelCase__ = prepare_img() UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ) UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase__ = (1, 10_00) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = torch.from_numpy(__a ) return 2.0 * image - 1.0 class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ): if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): UpperCamelCase__ = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase__ = next(self.unet.parameters() ).dtype UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device ) UpperCamelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase__ = torch.cat([latents, image] , dim=1 ) UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase__ = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = torch.clamp(SCREAMING_SNAKE_CASE_ , -1.0 , 1.0 ) UpperCamelCase__ = image / 2 + 0.5 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = jnp.floataa SCREAMING_SNAKE_CASE__ = True def UpperCAmelCase_ (self ): super().setup() UpperCamelCase__ = nn.Dense(5 , dtype=self.dtype ) def __call__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaxBigBirdForNaturalQuestionsModule def __magic_name__ ( __a : Union[str, Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : Optional[int] ): '''simple docstring''' def cross_entropy(__a : Optional[Any] , __a : List[str] , __a : str=None ): UpperCamelCase__ = logits.shape[-1] UpperCamelCase__ = (labels[..., None] == jnp.arange(__a )[None]).astype("""f4""" ) UpperCamelCase__ = jax.nn.log_softmax(__a , axis=-1 ) UpperCamelCase__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: UpperCamelCase__ = reduction(__a ) return loss UpperCamelCase__ = partial(__a , reduction=jnp.mean ) UpperCamelCase__ = cross_entropy(__a , __a ) UpperCamelCase__ = cross_entropy(__a , __a ) UpperCamelCase__ = cross_entropy(__a , __a ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __A: """simple docstring""" SCREAMING_SNAKE_CASE__ = "google/bigbird-roberta-base" SCREAMING_SNAKE_CASE__ = 3000 SCREAMING_SNAKE_CASE__ = 10500 SCREAMING_SNAKE_CASE__ = 128 SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 5 # tx_args SCREAMING_SNAKE_CASE__ = 3e-5 SCREAMING_SNAKE_CASE__ = 0.0 SCREAMING_SNAKE_CASE__ = 20000 SCREAMING_SNAKE_CASE__ = 0.0095 SCREAMING_SNAKE_CASE__ = "bigbird-roberta-natural-questions" SCREAMING_SNAKE_CASE__ = "training-expt" SCREAMING_SNAKE_CASE__ = "data/nq-training.jsonl" SCREAMING_SNAKE_CASE__ = "data/nq-validation.jsonl" def UpperCAmelCase_ (self ): os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = os.path.join(self.base_dir , self.save_dir ) UpperCamelCase__ = self.batch_size_per_device * jax.device_count() @dataclass class __A: """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 4096 # no dynamic padding on TPUs def __call__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.collate_fn(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return batch def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ , UpperCamelCase__ = self.fetch_inputs(features["""input_ids"""] ) UpperCamelCase__ = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [self._fetch_inputs(SCREAMING_SNAKE_CASE_ ) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )] while len(SCREAMING_SNAKE_CASE_ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def __magic_name__ ( __a : List[Any] , __a : str , __a : Tuple=None ): '''simple docstring''' if seed is not None: UpperCamelCase__ = dataset.shuffle(seed=__a ) for i in range(len(__a ) // batch_size ): UpperCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(__a ) @partial(jax.pmap , axis_name="""batch""" ) def __magic_name__ ( __a : Any , __a : Dict , **__a : Union[str, Any] ): '''simple docstring''' def loss_fn(__a : Optional[Any] ): UpperCamelCase__ = model_inputs.pop("""start_labels""" ) UpperCamelCase__ = model_inputs.pop("""end_labels""" ) UpperCamelCase__ = model_inputs.pop("""pooled_labels""" ) UpperCamelCase__ = state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = outputs return state.loss_fn( __a , __a , __a , __a , __a , __a , ) UpperCamelCase__ , UpperCamelCase__ = jax.random.split(__a ) UpperCamelCase__ = jax.value_and_grad(__a ) UpperCamelCase__ , UpperCamelCase__ = grad_fn(state.params ) UpperCamelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) UpperCamelCase__ = jax.lax.pmean(__a , """batch""" ) UpperCamelCase__ = state.apply_gradients(grads=__a ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def __magic_name__ ( __a : int , **__a : List[str] ): '''simple docstring''' UpperCamelCase__ = model_inputs.pop("""start_labels""" ) UpperCamelCase__ = model_inputs.pop("""end_labels""" ) UpperCamelCase__ = model_inputs.pop("""pooled_labels""" ) UpperCamelCase__ = state.apply_fn(**__a , params=state.params , train=__a ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = outputs UpperCamelCase__ = state.loss_fn(__a , __a , __a , __a , __a , __a ) UpperCamelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class __A( train_state.TrainState ): """simple docstring""" SCREAMING_SNAKE_CASE__ = struct.field(pytree_node=__lowerCamelCase ) @dataclass class __A: """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): UpperCamelCase__ = model.params UpperCamelCase__ = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , ) if ckpt_dir is not None: UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } UpperCamelCase__ , UpperCamelCase__ = build_tx(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = train_state.TrainState( step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = args UpperCamelCase__ = data_collator UpperCamelCase__ = lr UpperCamelCase__ = params UpperCamelCase__ = jax_utils.replicate(SCREAMING_SNAKE_CASE_ ) return state def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.args UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // args.batch_size UpperCamelCase__ = jax.random.PRNGKey(0 ) UpperCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() ) for epoch in range(args.max_epochs ): UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa ) UpperCamelCase__ = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=F"Running EPOCH-{epoch}" ): UpperCamelCase__ = self.data_collator(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: UpperCamelCase__ = jax_utils.unreplicate(state.step ) UpperCamelCase__ = running_loss.item() / i UpperCamelCase__ = self.scheduler_fn(state_step - 1 ) UpperCamelCase__ = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE_ ) ) self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // self.args.batch_size UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa ) UpperCamelCase__ = 0 for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """ ): UpperCamelCase__ = self.data_collator(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_ ) print(F"SAVING CHECKPOINT IN {save_dir}" , end=""" ... """ ) self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib""" ) ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_ ) print("""DONE""" ) def __magic_name__ ( __a : List[str] , __a : str ): '''simple docstring''' print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=""" ... """ ) with open(os.path.join(__a , """flax_model.msgpack""" ) , """rb""" ) as f: UpperCamelCase__ = from_bytes(state.params , f.read() ) with open(os.path.join(__a , """opt_state.msgpack""" ) , """rb""" ) as f: UpperCamelCase__ = from_bytes(state.opt_state , f.read() ) UpperCamelCase__ = joblib.load(os.path.join(__a , """args.joblib""" ) ) UpperCamelCase__ = joblib.load(os.path.join(__a , """data_collator.joblib""" ) ) with open(os.path.join(__a , """training_state.json""" ) , """r""" ) as f: UpperCamelCase__ = json.load(__a ) UpperCamelCase__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def __magic_name__ ( __a : Dict , __a : int , __a : Tuple , __a : Tuple ): '''simple docstring''' UpperCamelCase__ = num_train_steps - warmup_steps UpperCamelCase__ = optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a ) UpperCamelCase__ = optax.linear_schedule(init_value=__a , end_value=1E-7 , transition_steps=__a ) UpperCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def __magic_name__ ( __a : int , __a : List[Any] , __a : int , __a : Optional[int] , __a : Optional[Any] ): '''simple docstring''' def weight_decay_mask(__a : Tuple ): UpperCamelCase__ = traverse_util.flatten_dict(__a ) UpperCamelCase__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(__a ) UpperCamelCase__ = scheduler_fn(__a , __a , __a , __a ) UpperCamelCase__ = optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a ) return tx, lr
86
def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = len(__a ) UpperCamelCase__ = len(__a ) UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase__ = True for i in range(__a ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase__ = True if a[i].islower(): UpperCamelCase__ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
86
1
import argparse import json from tqdm import tqdm def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=__a , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=__a , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=__a , help="""where to store parsed gold_data_path file""" , ) UpperCamelCase__ = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: UpperCamelCase__ = json.load(__a ) for dpr_record in tqdm(__a ): UpperCamelCase__ = dpr_record["""question"""] UpperCamelCase__ = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(__a ) + """\n""" ) if __name__ == "__main__": main()
86
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase_ = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__lowerCamelCase ) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , **SCREAMING_SNAKE_CASE_ ): super().__init__(**SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) requires_backends(self , """vision""" ) self.check_model_type(SCREAMING_SNAKE_CASE_ ) def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): if "text_queries" in kwargs: UpperCamelCase__ = kwargs.pop("""text_queries""" ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image) ): UpperCamelCase__ = {"""image""": image, """candidate_labels""": candidate_labels} else: UpperCamelCase__ = image UpperCamelCase__ = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return results def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = {} if "threshold" in kwargs: UpperCamelCase__ = kwargs["""threshold"""] if "top_k" in kwargs: UpperCamelCase__ = kwargs["""top_k"""] return {}, {}, postprocess_params def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = load_image(inputs["""image"""] ) UpperCamelCase__ = inputs["""candidate_labels"""] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = candidate_labels.split(""",""" ) UpperCamelCase__ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) UpperCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) yield { "is_last": i == len(SCREAMING_SNAKE_CASE_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = model_inputs.pop("""target_size""" ) UpperCamelCase__ = model_inputs.pop("""candidate_label""" ) UpperCamelCase__ = model_inputs.pop("""is_last""" ) UpperCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=None ): UpperCamelCase__ = [] for model_output in model_outputs: UpperCamelCase__ = model_output["""candidate_label"""] UpperCamelCase__ = BaseModelOutput(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.image_processor.post_process_object_detection( outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): UpperCamelCase__ = outputs["""scores"""][index].item() UpperCamelCase__ = self._get_bounding_box(outputs["""boxes"""][index][0] ) UpperCamelCase__ = {"""score""": score, """label""": label, """box""": box} results.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x["score"] , reverse=SCREAMING_SNAKE_CASE_ ) if top_k: UpperCamelCase__ = results[:top_k] return results def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = box.int().tolist() UpperCamelCase__ = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
86
from PIL import Image def __magic_name__ ( __a : Image , __a : float ): '''simple docstring''' def brightness(__a : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 lowerCamelCase_ = change_brightness(img, 1_00) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
86
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def __magic_name__ ( __a : Union[List, PIL.Image.Image, torch.Tensor] ): '''simple docstring''' warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" , __a , ) if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): UpperCamelCase__ = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase__ , UpperCamelCase__ = image[0].size UpperCamelCase__ , UpperCamelCase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 UpperCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] UpperCamelCase__ = np.concatenate(__a , axis=0 ) UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = 2.0 * image - 1.0 UpperCamelCase__ = torch.from_numpy(__a ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase__ = torch.cat(__a , dim=0 ) return image def __magic_name__ ( __a : Union[List, PIL.Image.Image, torch.Tensor] ): '''simple docstring''' if isinstance(__a , torch.Tensor ): return mask elif isinstance(__a , PIL.Image.Image ): UpperCamelCase__ = [mask] if isinstance(mask[0] , PIL.Image.Image ): UpperCamelCase__ , UpperCamelCase__ = mask[0].size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] UpperCamelCase__ = np.concatenate(__a , axis=0 ) UpperCamelCase__ = mask.astype(np.floataa ) / 255.0 UpperCamelCase__ = 0 UpperCamelCase__ = 1 UpperCamelCase__ = torch.from_numpy(__a ) elif isinstance(mask[0] , torch.Tensor ): UpperCamelCase__ = torch.cat(__a , dim=0 ) return mask class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ): UpperCamelCase__ = image UpperCamelCase__ = _preprocess_image(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = original_image.to(device=self.device , dtype=self.unet.dtype ) UpperCamelCase__ = _preprocess_mask(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = mask_image.to(device=self.device , dtype=self.unet.dtype ) UpperCamelCase__ = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators." ) UpperCamelCase__ = original_image.shape UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device ) UpperCamelCase__ = eta UpperCamelCase__ = self.scheduler.timesteps[0] + 1 UpperCamelCase__ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute previous image: x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample else: # compute the reverse: x_t-1 -> x_t UpperCamelCase__ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = t UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCamelCase_ = [None] * 10_00_00_00 lowerCamelCase_ = True lowerCamelCase_ = False def __magic_name__ ( __a : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCamelCase__ = chain(next_number(__a ) ) UpperCamelCase__ = number_chain while number < 10_000_000: UpperCamelCase__ = number_chain number *= 10 return number_chain def __magic_name__ ( __a : int = 10_000_000 ): '''simple docstring''' for i in range(1 , __a ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__a ) if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution() = }')
86
1
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = { """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 20, """a """ * 30, """b """ * 7], } UpperCamelCase__ = Dataset.from_dict(__a ) return dataset class __A( __lowerCamelCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = get_dataset() UpperCamelCase__ = make_duplicate_clusters(SCREAMING_SNAKE_CASE_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = get_dataset() UpperCamelCase__ , UpperCamelCase__ = deduplicate_dataset(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) print(SCREAMING_SNAKE_CASE_ ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , SCREAMING_SNAKE_CASE_ )
86
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(__a , __a ) lowerCamelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = list(s_dict.keys() ) for key in keys: UpperCamelCase__ = key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCamelCase__ = new_key.replace(__a , __a ) print(f"{key} -> {new_key}" ) UpperCamelCase__ = s_dict.pop(__a ) return s_dict def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.basename(__a ) UpperCamelCase__ = url.split("""/""" )[-2] UpperCamelCase__ = os.path.join(__a , __a ) if os.path.exists(__a ) and not os.path.isfile(__a ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(__a ): UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop: while True: UpperCamelCase__ = source.read(8_192 ) if not buffer: break output.write(__a ) loop.update(len(__a ) ) UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ): '''simple docstring''' if ".pt" not in checkpoint_path: UpperCamelCase__ = _download(_MODELS[checkpoint_path] ) else: UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = original_checkpoint["""dims"""] UpperCamelCase__ = original_checkpoint["""model_state_dict"""] UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(__a ) rename_keys(__a ) UpperCamelCase__ = True UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] UpperCamelCase__ = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) UpperCamelCase__ = WhisperForConditionalGeneration(__a ) UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase__ = proj_out_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
86
1
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename lowerCamelCase_ = '''http://www.mocksite.com/file1.txt''' lowerCamelCase_ = '''"text": ["foo", "foo"]''' lowerCamelCase_ = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8''' class __A: """simple docstring""" SCREAMING_SNAKE_CASE__ = 200 SCREAMING_SNAKE_CASE__ = {"""Content-Length""": """100"""} SCREAMING_SNAKE_CASE__ = {} def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): return [bytes(SCREAMING_SNAKE_CASE_ , """utf-8""" )] def __magic_name__ ( *__a : Dict , **__a : Optional[int] ): '''simple docstring''' return MockResponse() @pytest.mark.parametrize("""urls_type""" , [str, list, dict] ) def __magic_name__ ( __a : str , __a : Optional[Any] , __a : List[str] ): '''simple docstring''' import requests monkeypatch.setattr(__a , """request""" , __a ) UpperCamelCase__ = URL if issubclass(__a , __a ): UpperCamelCase__ = url elif issubclass(__a , __a ): UpperCamelCase__ = [url] elif issubclass(__a , __a ): UpperCamelCase__ = {"""train""": url} UpperCamelCase__ = """dummy""" UpperCamelCase__ = """downloads""" UpperCamelCase__ = tmp_path UpperCamelCase__ = DownloadConfig( cache_dir=os.path.join(__a , __a ) , use_etag=__a , ) UpperCamelCase__ = DownloadManager(dataset_name=__a , download_config=__a ) UpperCamelCase__ = dl_manager.download(__a ) UpperCamelCase__ = urls for downloaded_paths in [downloaded_paths]: if isinstance(__a , __a ): UpperCamelCase__ = [downloaded_paths] UpperCamelCase__ = [urls] elif isinstance(__a , __a ): assert "train" in downloaded_paths.keys() UpperCamelCase__ = downloaded_paths.values() UpperCamelCase__ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__a , __a ): assert downloaded_path == dl_manager.downloaded_paths[input_url] UpperCamelCase__ = Path(__a ) UpperCamelCase__ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() UpperCamelCase__ = downloaded_path.read_text() assert content == CONTENT UpperCamelCase__ = downloaded_path.with_suffix(""".json""" ) assert metadata_downloaded_path.exists() UpperCamelCase__ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("""paths_type""" , [str, list, dict] ) def __magic_name__ ( __a : Optional[Any] , __a : int , __a : str ): '''simple docstring''' UpperCamelCase__ = str(__a ) if issubclass(__a , __a ): UpperCamelCase__ = filename elif issubclass(__a , __a ): UpperCamelCase__ = [filename] elif issubclass(__a , __a ): UpperCamelCase__ = {"""train""": filename} UpperCamelCase__ = """dummy""" UpperCamelCase__ = xz_file.parent UpperCamelCase__ = """extracted""" UpperCamelCase__ = DownloadConfig( cache_dir=__a , use_etag=__a , ) UpperCamelCase__ = DownloadManager(dataset_name=__a , download_config=__a ) UpperCamelCase__ = dl_manager.extract(__a ) UpperCamelCase__ = paths for extracted_paths in [extracted_paths]: if isinstance(__a , __a ): UpperCamelCase__ = [extracted_paths] UpperCamelCase__ = [paths] elif isinstance(__a , __a ): assert "train" in extracted_paths.keys() UpperCamelCase__ = extracted_paths.values() UpperCamelCase__ = paths.values() assert extracted_paths for extracted_path, input_path in zip(__a , __a ): assert extracted_path == dl_manager.extracted_paths[input_path] UpperCamelCase__ = Path(__a ) UpperCamelCase__ = extracted_path.parts assert parts[-1] == hash_url_to_filename(__a , etag=__a ) assert parts[-2] == extracted_subdir assert extracted_path.exists() UpperCamelCase__ = extracted_path.read_text() UpperCamelCase__ = text_file.read_text() assert extracted_file_content == expected_file_content def __magic_name__ ( __a : Tuple , __a : List[Any] ): '''simple docstring''' assert path.endswith(""".jsonl""" ) for num_items, line in enumerate(__a , start=1 ): UpperCamelCase__ = json.loads(line.decode("""utf-8""" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] ) def __magic_name__ ( __a : List[Any] , __a : Any ): '''simple docstring''' UpperCamelCase__ = request.getfixturevalue(__a ) UpperCamelCase__ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__a ) , start=1 ): _test_jsonl(__a , __a ) assert num_jsonl == 2 @pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] ) def __magic_name__ ( __a : Any , __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = request.getfixturevalue(__a ) UpperCamelCase__ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__a ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__a ) , start=1 ): _test_jsonl(__a , __a ) assert num_tar == 1 assert num_jsonl == 2 def __magic_name__ ( __a : Any ): '''simple docstring''' UpperCamelCase__ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__a ) , start=1 ): assert os.path.basename(__a ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
86
def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = [[0 for _ in range(__a )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCamelCase__ = 1 for n in range(m + 1 ): for k in range(1 , __a ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase_ = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCamelCase_ = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
86
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope UpperCamelCase__ = vocab_size - 1 def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase_ (self ): return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ = True return config, input_ids, input_mask, token_labels def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = GPTNeoXModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = True UpperCamelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = True UpperCamelCase__ = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = output_from_no_past["""hidden_states"""][0] UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )["""hidden_states"""][0] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (GPTNeoXForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": GPTNeoXModel, """question-answering""": GPTNeoXForQuestionAnswering, """text-classification""": GPTNeoXForSequenceClassification, """text-generation""": GPTNeoXForCausalLM, """token-classification""": GPTNeoXForTokenClassification, """zero-shot""": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = GPTNeoXModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=64 , num_attention_heads=8 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # This regression test was failing with PyTorch < 1.3 UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ = None self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def UpperCAmelCase_ (self ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size ) UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE_ ) original_model.to(SCREAMING_SNAKE_CASE_ ) original_model.eval() UpperCamelCase__ = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state UpperCamelCase__ = original_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0} UpperCamelCase__ = GPTNeoXModel(SCREAMING_SNAKE_CASE_ ) scaled_model.to(SCREAMING_SNAKE_CASE_ ) scaled_model.eval() UpperCamelCase__ = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state UpperCamelCase__ = scaled_model(SCREAMING_SNAKE_CASE_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-5 ) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCamelCase__ = model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=20 ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if sources is int: UpperCamelCase__ = [sources] if sinks is int: UpperCamelCase__ = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return UpperCamelCase__ = sources[0] UpperCamelCase__ = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: UpperCamelCase__ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase__ = max_input_flow UpperCamelCase__ = 0 UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase__ = max_input_flow UpperCamelCase__ = size - 1 def UpperCAmelCase_ (self ): if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = algorithm(self ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = flow_network UpperCamelCase__ = flow_network.verticesCount UpperCamelCase__ = flow_network.sourceIndex UpperCamelCase__ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase__ = flow_network.graph UpperCamelCase__ = False def UpperCAmelCase_ (self ): if not self.executed: self._algorithm() UpperCamelCase__ = True def UpperCAmelCase_ (self ): pass class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result UpperCamelCase__ = -1 def UpperCAmelCase_ (self ): if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase__ = [0] * self.verticies_count UpperCamelCase__ = [0] * self.verticies_count def UpperCAmelCase_ (self ): UpperCamelCase__ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase__ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase__ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = vertices_list[i] UpperCamelCase__ = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = 0 else: i += 1 UpperCamelCase__ = sum(self.preflow[self.source_index] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase__ = self.heights[to_index] if min_height is not None: UpperCamelCase__ = min_height + 1 if __name__ == "__main__": lowerCamelCase_ = [0] lowerCamelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowerCamelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowerCamelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowerCamelCase_ = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
86
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def __magic_name__ ( __a : List[str] , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = checkpoint UpperCamelCase__ = {} UpperCamelCase__ = vae_state_dict["""encoder.conv_in.weight"""] UpperCamelCase__ = vae_state_dict["""encoder.conv_in.bias"""] UpperCamelCase__ = vae_state_dict["""encoder.conv_out.weight"""] UpperCamelCase__ = vae_state_dict["""encoder.conv_out.bias"""] UpperCamelCase__ = vae_state_dict["""encoder.norm_out.weight"""] UpperCamelCase__ = vae_state_dict["""encoder.norm_out.bias"""] UpperCamelCase__ = vae_state_dict["""decoder.conv_in.weight"""] UpperCamelCase__ = vae_state_dict["""decoder.conv_in.bias"""] UpperCamelCase__ = vae_state_dict["""decoder.conv_out.weight"""] UpperCamelCase__ = vae_state_dict["""decoder.conv_out.bias"""] UpperCamelCase__ = vae_state_dict["""decoder.norm_out.weight"""] UpperCamelCase__ = vae_state_dict["""decoder.norm_out.bias"""] UpperCamelCase__ = vae_state_dict["""quant_conv.weight"""] UpperCamelCase__ = vae_state_dict["""quant_conv.bias"""] UpperCamelCase__ = vae_state_dict["""post_quant_conv.weight"""] UpperCamelCase__ = vae_state_dict["""post_quant_conv.bias"""] # Retrieves the keys for the encoder down blocks only UpperCamelCase__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} ) UpperCamelCase__ = { layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only UpperCamelCase__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} ) UpperCamelCase__ = { layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): UpperCamelCase__ = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: UpperCamelCase__ = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.weight" ) UpperCamelCase__ = vae_state_dict.pop( f"encoder.down.{i}.downsample.conv.bias" ) UpperCamelCase__ = renew_vae_resnet_paths(__a ) UpperCamelCase__ = {"""old""": f"down.{i}.block", """new""": f"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ = [key for key in vae_state_dict if """encoder.mid.block""" in key] UpperCamelCase__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] UpperCamelCase__ = renew_vae_resnet_paths(__a ) UpperCamelCase__ = {"""old""": f"mid.block_{i}", """new""": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key] UpperCamelCase__ = renew_vae_attention_paths(__a ) UpperCamelCase__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): UpperCamelCase__ = num_up_blocks - 1 - i UpperCamelCase__ = [ key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key ] if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: UpperCamelCase__ = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.weight" ] UpperCamelCase__ = vae_state_dict[ f"decoder.up.{block_id}.upsample.conv.bias" ] UpperCamelCase__ = renew_vae_resnet_paths(__a ) UpperCamelCase__ = {"""old""": f"up.{block_id}.block", """new""": f"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ = [key for key in vae_state_dict if """decoder.mid.block""" in key] UpperCamelCase__ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCamelCase__ = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] UpperCamelCase__ = renew_vae_resnet_paths(__a ) UpperCamelCase__ = {"""old""": f"mid.block_{i}", """new""": f"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) UpperCamelCase__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key] UpperCamelCase__ = renew_vae_attention_paths(__a ) UpperCamelCase__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def __magic_name__ ( __a : str , __a : str , ): '''simple docstring''' UpperCamelCase__ = requests.get( """ https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" ) UpperCamelCase__ = io.BytesIO(r.content ) UpperCamelCase__ = OmegaConf.load(__a ) UpperCamelCase__ = 512 UpperCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu""" if checkpoint_path.endswith("""safetensors""" ): from safetensors import safe_open UpperCamelCase__ = {} with safe_open(__a , framework="""pt""" , device="""cpu""" ) as f: for key in f.keys(): UpperCamelCase__ = f.get_tensor(__a ) else: UpperCamelCase__ = torch.load(__a , map_location=__a )["""state_dict"""] # Convert the VAE model. UpperCamelCase__ = create_vae_diffusers_config(__a , image_size=__a ) UpperCamelCase__ = custom_convert_ldm_vae_checkpoint(__a , __a ) UpperCamelCase__ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase_ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
86
from timeit import timeit def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: number &= number - 1 result += 1 return result def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __magic_name__ ( ): '''simple docstring''' def do_benchmark(__a : int ) -> None: UpperCamelCase__ = """import __main__ as z""" print(f"Benchmark when {number = }:" ) print(f"{get_set_bits_count_using_modulo_operator(__a ) = }" ) UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a ) print(f"timeit() runs in {timing} seconds" ) print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" ) UpperCamelCase__ = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , ) print(f"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
86
1
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __a : Any ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __a : List[Any] , __a : Any ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False elif args.student_type == "gpt2": UpperCamelCase__ = False def __magic_name__ ( __a : int , __a : Dict ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__a , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" ) UpperCamelCase__ = parser.parse_args() sanity_checks(__a ) # ARGS # init_gpu_params(__a ) set_seed(__a ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"Experiment will be dumped and logged in {args.dump_path}" ) # SAVE PARAMS # logger.info(f"Param: {args}" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__a ) , __a , indent=4 ) git_log(args.dump_path ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCamelCase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCamelCase__ = tokenizer.all_special_tokens.index(__a ) UpperCamelCase__ = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}" ) UpperCamelCase__ = special_tok_ids UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}" ) with open(args.data_file , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" ) with open(args.token_counts , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCamelCase__ = 0.0 # do not predict special tokens UpperCamelCase__ = torch.from_numpy(__a ) else: UpperCamelCase__ = None UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"Loading student config from {args.student_config}" ) UpperCamelCase__ = student_config_class.from_pretrained(args.student_config ) UpperCamelCase__ = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" ) UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a ) else: UpperCamelCase__ = student_model_class(__a ) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}" ) logger.info("""Student loaded.""" ) # TEACHER # UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a ) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}" ) logger.info(f"Teacher loaded from {args.teacher_name}." ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__a , __a ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__a , __a ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCamelCase__ = Distiller( params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
86
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __A( __lowerCamelCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def UpperCAmelCase_ (self ): import PIL.Image UpperCamelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects: UpperCamelCase__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) UpperCamelCase__ , UpperCamelCase__ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __magic_name__ ( __a : List[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Tuple , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=__a , features=__a ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() UpperCamelCase__ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__a ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : List[Any] , __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Union[str, Any] , __a : Any ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Optional[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCamelCase__ = os.path.join(__a , """test.arrow""" ) with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(__a , 1 ) def __magic_name__ ( __a : Any ): '''simple docstring''' if pa.types.is_list(__a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __magic_name__ ( __a : Optional[int] , __a : Any ): '''simple docstring''' if isinstance(lst[0] , __a ): change_first_primitive_element_in_list(lst[0] , __a ) else: UpperCamelCase__ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] , __a : Tuple ): '''simple docstring''' UpperCamelCase__ = pa.array(TypedSequence(__a , optimized_int_type=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Optional[int] , __a : str , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCamelCase__ = copy.deepcopy(__a ) UpperCamelCase__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__a , __a ) UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __magic_name__ ( __a : List[str] , __a : List[str] ): '''simple docstring''' UpperCamelCase__ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=__a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __magic_name__ ( __a : Tuple ): '''simple docstring''' UpperCamelCase__ = """mock://dataset-train.arrow""" with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__a ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter(stream=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __magic_name__ ( __a : str , __a : Any ): '''simple docstring''' import PIL.Image UpperCamelCase__ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" ) UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter( stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) UpperCamelCase__ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , __a ) with open(__a , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] ) UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter(stream=__a ) as writer: writer._build_writer(inferred_schema=__a ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
86
1
from math import isqrt def __magic_name__ ( __a : int ): '''simple docstring''' return all(number % divisor != 0 for divisor in range(2 , isqrt(__a ) + 1 ) ) def __magic_name__ ( __a : int = 10**6 ): '''simple docstring''' UpperCamelCase__ = 0 UpperCamelCase__ = 1 UpperCamelCase__ = 7 while prime_candidate < max_prime: primes_count += is_prime(__a ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f'{solution() = }')
86
from sklearn.metrics import matthews_corrcoef import datasets lowerCamelCase_ = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' lowerCamelCase_ = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' lowerCamelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): return { "matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ), }
86
1
from __future__ import annotations def __magic_name__ ( __a : str ): '''simple docstring''' return [ord(__a ) - 96 for elem in plain] def __magic_name__ ( __a : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , __a ) print("""Decoded:""" , decode(__a ) ) if __name__ == "__main__": main()
86
def __magic_name__ ( __a : str ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = credit_card_number UpperCamelCase__ = 0 UpperCamelCase__ = len(__a ) - 2 for i in range(__a , -1 , -2 ): # double the value of every second digit UpperCamelCase__ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCamelCase__ = cc_number[:i] + str(__a ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__a ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = f"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(f"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(__a ) <= 16: print(f"{error_message} of its length." ) return False if not validate_initial_digits(__a ): print(f"{error_message} of its first two digits." ) return False if not luhn_validation(__a ): print(f"{error_message} it fails the Luhn check." ) return False print(f"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
86
1
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __A( __lowerCamelCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def UpperCAmelCase_ (self ): import PIL.Image UpperCamelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects: UpperCamelCase__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) UpperCamelCase__ , UpperCamelCase__ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __magic_name__ ( __a : List[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Tuple , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=__a , features=__a ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() UpperCamelCase__ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__a ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : List[Any] , __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Union[str, Any] , __a : Any ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Optional[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCamelCase__ = os.path.join(__a , """test.arrow""" ) with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(__a , 1 ) def __magic_name__ ( __a : Any ): '''simple docstring''' if pa.types.is_list(__a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __magic_name__ ( __a : Optional[int] , __a : Any ): '''simple docstring''' if isinstance(lst[0] , __a ): change_first_primitive_element_in_list(lst[0] , __a ) else: UpperCamelCase__ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] , __a : Tuple ): '''simple docstring''' UpperCamelCase__ = pa.array(TypedSequence(__a , optimized_int_type=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Optional[int] , __a : str , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCamelCase__ = copy.deepcopy(__a ) UpperCamelCase__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__a , __a ) UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __magic_name__ ( __a : List[str] , __a : List[str] ): '''simple docstring''' UpperCamelCase__ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=__a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __magic_name__ ( __a : Tuple ): '''simple docstring''' UpperCamelCase__ = """mock://dataset-train.arrow""" with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__a ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter(stream=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __magic_name__ ( __a : str , __a : Any ): '''simple docstring''' import PIL.Image UpperCamelCase__ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" ) UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter( stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) UpperCamelCase__ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , __a ) with open(__a , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] ) UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter(stream=__a ) as writer: writer._build_writer(inferred_schema=__a ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
86
def __magic_name__ ( __a : int = 50 ): '''simple docstring''' UpperCamelCase__ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
86
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __magic_name__ ( __a : Union[str, Any] , __a : int=False ): '''simple docstring''' UpperCamelCase__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def __magic_name__ ( __a : Dict , __a : Tuple , __a : List[str]=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: UpperCamelCase__ = """""" else: UpperCamelCase__ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) UpperCamelCase__ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) UpperCamelCase__ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase__ = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ = in_proj_bias[: config.hidden_size] UpperCamelCase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] UpperCamelCase__ = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ = in_proj_bias[-config.hidden_size :] def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__a , __a ) def __magic_name__ ( __a : Any , __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = dct.pop(__a ) UpperCamelCase__ = val def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def __magic_name__ ( __a : List[str] , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = ViTConfig() UpperCamelCase__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": UpperCamelCase__ = True UpperCamelCase__ = int(vit_name[-12:-10] ) UpperCamelCase__ = int(vit_name[-9:-6] ) else: UpperCamelCase__ = 1_000 UpperCamelCase__ = """huggingface/label-files""" UpperCamelCase__ = """imagenet-1k-id2label.json""" UpperCamelCase__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()} UpperCamelCase__ = idalabel UpperCamelCase__ = {v: k for k, v in idalabel.items()} UpperCamelCase__ = int(vit_name[-6:-4] ) UpperCamelCase__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): UpperCamelCase__ = 192 UpperCamelCase__ = 768 UpperCamelCase__ = 12 UpperCamelCase__ = 3 elif vit_name[9:].startswith("""small""" ): UpperCamelCase__ = 384 UpperCamelCase__ = 1_536 UpperCamelCase__ = 12 UpperCamelCase__ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): UpperCamelCase__ = 768 UpperCamelCase__ = 2_304 UpperCamelCase__ = 8 UpperCamelCase__ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): UpperCamelCase__ = 1_024 UpperCamelCase__ = 4_096 UpperCamelCase__ = 24 UpperCamelCase__ = 16 elif vit_name[4:].startswith("""huge""" ): UpperCamelCase__ = 1_280 UpperCamelCase__ = 5_120 UpperCamelCase__ = 32 UpperCamelCase__ = 16 # load original model from timm UpperCamelCase__ = timm.create_model(__a , pretrained=__a ) timm_model.eval() # load state_dict of original model, remove and rename some keys UpperCamelCase__ = timm_model.state_dict() if base_model: remove_classification_head_(__a ) UpperCamelCase__ = create_rename_keys(__a , __a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) read_in_q_k_v(__a , __a , __a ) # load HuggingFace model if vit_name[-5:] == "in21k": UpperCamelCase__ = ViTModel(__a ).eval() else: UpperCamelCase__ = ViTForImageClassification(__a ).eval() model.load_state_dict(__a ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: UpperCamelCase__ = DeiTImageProcessor(size=config.image_size ) else: UpperCamelCase__ = ViTImageProcessor(size=config.image_size ) UpperCamelCase__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) UpperCamelCase__ = encoding["""pixel_values"""] UpperCamelCase__ = model(__a ) if base_model: UpperCamelCase__ = timm_model.forward_features(__a ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__a , outputs.pooler_output , atol=1E-3 ) else: UpperCamelCase__ = timm_model(__a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__a , outputs.logits , atol=1E-3 ) Path(__a ).mkdir(exist_ok=__a ) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__a ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_patch16_224''', type=str, help='''Name of the ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
86
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = RobertaTokenizer SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""} def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = """Encode this sequence.""" UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens UpperCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Encode <mask> sequence""" UpperCamelCase__ = """Encode <mask>sequence""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A, <mask> AllenNLP sentence.""" UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCAmelCase_ (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
86
1
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py lowerCamelCase_ = '''.''' if __name__ == "__main__": lowerCamelCase_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') lowerCamelCase_ = [] lowerCamelCase_ = [] with open(doctest_file_path) as fp: for line in fp: lowerCamelCase_ = line.strip() lowerCamelCase_ = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: lowerCamelCase_ = '''\n'''.join(non_existent_paths) raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
86
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __a : Any ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __a : List[Any] , __a : Any ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False elif args.student_type == "gpt2": UpperCamelCase__ = False def __magic_name__ ( __a : int , __a : Dict ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__a , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" ) UpperCamelCase__ = parser.parse_args() sanity_checks(__a ) # ARGS # init_gpu_params(__a ) set_seed(__a ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"Experiment will be dumped and logged in {args.dump_path}" ) # SAVE PARAMS # logger.info(f"Param: {args}" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__a ) , __a , indent=4 ) git_log(args.dump_path ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCamelCase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCamelCase__ = tokenizer.all_special_tokens.index(__a ) UpperCamelCase__ = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}" ) UpperCamelCase__ = special_tok_ids UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}" ) with open(args.data_file , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" ) with open(args.token_counts , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCamelCase__ = 0.0 # do not predict special tokens UpperCamelCase__ = torch.from_numpy(__a ) else: UpperCamelCase__ = None UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"Loading student config from {args.student_config}" ) UpperCamelCase__ = student_config_class.from_pretrained(args.student_config ) UpperCamelCase__ = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" ) UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a ) else: UpperCamelCase__ = student_model_class(__a ) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}" ) logger.info("""Student loaded.""" ) # TEACHER # UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a ) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}" ) logger.info(f"Teacher loaded from {args.teacher_name}." ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__a , __a ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__a , __a ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCamelCase__ = Distiller( params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
86
1
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever lowerCamelCase_ = logging.getLogger(__name__) class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): super().__init__( SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): logger.info("""initializing retrieval""" ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info("""dist initialized""" ) # needs to be set manually UpperCamelCase__ = self._infer_socket_ifname() # avoid clash with the NCCL port UpperCamelCase__ = str(distributed_port + 1 ) UpperCamelCase__ = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend="""gloo""" ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info("""dist not initialized / main""" ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def UpperCAmelCase_ (self ): return dist.get_rank(group=self.process_group ) == 0 def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=torch.floataa ): UpperCamelCase__ = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group ) return target_tensor def UpperCAmelCase_ (self ): UpperCamelCase__ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names UpperCamelCase__ = next((addr for addr in addrs if addr.startswith("""e""" )) , SCREAMING_SNAKE_CASE_ ) return ifname def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # single GPU training if not dist.is_initialized(): UpperCamelCase__ , UpperCamelCase__ = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ ) # distributed training UpperCamelCase__ = dist.get_world_size(group=self.process_group ) # gather logic UpperCamelCase__ = None if self._is_main(): UpperCamelCase__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )] dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group ) # scatter logic UpperCamelCase__ = question_hidden_states.shape[0] UpperCamelCase__ = [] UpperCamelCase__ = [] if self._is_main(): assert len(SCREAMING_SNAKE_CASE_ ) == world_size UpperCamelCase__ , UpperCamelCase__ = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa ) UpperCamelCase__ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
86
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
86
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''', # See all ViT models at https://huggingface.co/models?filter=vit } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """vit""" def __init__(self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = image_size UpperCamelCase__ = patch_size UpperCamelCase__ = num_channels UpperCamelCase__ = qkv_bias UpperCamelCase__ = encoder_stride class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse("""1.11""" ) @property def UpperCAmelCase_ (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase_ (self ): return 1E-4
86
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ): '''simple docstring''' UpperCamelCase__ = size[0] - overlap_pixels * 2 UpperCamelCase__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 ) if "l" in remove_borders: UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ): '''simple docstring''' return max(__a , min(__a , __a ) ) def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def __magic_name__ ( __a : [int] , __a : int , __a : [int] ): '''simple docstring''' UpperCamelCase__ = list(__a ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] ) return rect def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__a , (original_slice, 0) ) return result def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) UpperCamelCase__ = tile.crop(__a ) return tile def __magic_name__ ( __a : List[str] , __a : Any ): '''simple docstring''' UpperCamelCase__ = n % d return n - divisor class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ): super().__init__( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): torch.manual_seed(0 ) UpperCamelCase__ = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size ) UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] UpperCamelCase__ = translated_slice_x - (original_image_slice / 2) UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = to_input.size UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0] UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) UpperCamelCase__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , ) final_image.paste( SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ): UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) UpperCamelCase__ = math.ceil(image.size[0] / tile_size ) UpperCamelCase__ = math.ceil(image.size[1] / tile_size ) UpperCamelCase__ = tcx * tcy UpperCamelCase__ = 0 for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): self._process_tile( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa ) UpperCamelCase__ = pipe.to("""cuda""" ) UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__a : Optional[int] ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
86
1
def __magic_name__ ( __a : int | float | str ): '''simple docstring''' try: UpperCamelCase__ = float(__a ) except ValueError: raise ValueError("""Please enter a valid number""" ) UpperCamelCase__ = decimal - int(__a ) if fractional_part == 0: return int(__a ), 1 else: UpperCamelCase__ = len(str(__a ).split(""".""" )[1] ) UpperCamelCase__ = int(decimal * (10**number_of_frac_digits) ) UpperCamelCase__ = 10**number_of_frac_digits UpperCamelCase__ , UpperCamelCase__ = denominator, numerator while True: UpperCamelCase__ = dividend % divisor if remainder == 0: break UpperCamelCase__ , UpperCamelCase__ = divisor, remainder UpperCamelCase__ , UpperCamelCase__ = numerator / divisor, denominator / divisor return int(__a ), int(__a ) if __name__ == "__main__": print(f'{decimal_to_fraction(2) = }') print(f'{decimal_to_fraction(89.0) = }') print(f'{decimal_to_fraction("67") = }') print(f'{decimal_to_fraction("45.0") = }') print(f'{decimal_to_fraction(1.5) = }') print(f'{decimal_to_fraction("6.25") = }') print(f'{decimal_to_fraction("78td") = }')
86
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(SCREAMING_SNAKE_CASE_ )}." ) # get prompt text embeddings UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_embeddings.shape UpperCamelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ = 42 if negative_prompt is None: UpperCamelCase__ = [""""""] elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !=" F" {type(SCREAMING_SNAKE_CASE_ )}." ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCamelCase__ = negative_prompt UpperCamelCase__ = text_input_ids.shape[-1] UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , ) UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ = uncond_embeddings.shape[1] UpperCamelCase__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCamelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCamelCase__ = latents_reference.to(self.device ) UpperCamelCase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCamelCase__ = 0 if dx < 0 else dx UpperCamelCase__ = 0 if dy < 0 else dy UpperCamelCase__ = max(-dx , 0 ) UpperCamelCase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 ) UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 1 / 0.1_8215 * latents UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCamelCase__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to( self.device ) UpperCamelCase__ , UpperCamelCase__ = self.safety_checker( images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCamelCase__ = None if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
86
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = data def __iter__(self ): for element in self.data: yield element def __magic_name__ ( __a : List[str]=True ): '''simple docstring''' UpperCamelCase__ = Accelerator(even_batches=__a ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def __magic_name__ ( __a : Accelerator , __a : int , __a : int , __a : bool = False ): '''simple docstring''' if iterable: UpperCamelCase__ = DummyIterableDataset(torch.as_tensor(range(__a ) ) ) else: UpperCamelCase__ = TensorDataset(torch.as_tensor(range(__a ) ) ) UpperCamelCase__ = DataLoader(__a , batch_size=__a ) UpperCamelCase__ = accelerator.prepare(__a ) return dl def __magic_name__ ( __a : Accelerator , __a : int , __a : int , __a : List[int] , __a : List[int] , ): '''simple docstring''' UpperCamelCase__ = create_dataloader(accelerator=__a , dataset_size=__a , batch_size=__a ) UpperCamelCase__ = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = create_accelerator(even_batches=__a ) verify_dataloader_batch_sizes( __a , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __a , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = create_accelerator(even_batches=__a ) UpperCamelCase__ = torch.nn.Linear(1 , 1 ) UpperCamelCase__ = accelerator.prepare(__a ) UpperCamelCase__ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) UpperCamelCase__ = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__a ): UpperCamelCase__ = ddp_model(batch[0].float() ) UpperCamelCase__ = output.sum() loss.backward() batch_idxs.append(__a ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def __magic_name__ ( __a : List[Any] ): '''simple docstring''' with warnings.catch_warnings(record=__a ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __a ) assert "only supported for multi-GPU" in str(w[-1].message ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = create_accelerator(even_batches=__a ) UpperCamelCase__ = torch.nn.Linear(1 , 1 ) UpperCamelCase__ = accelerator.prepare(__a ) UpperCamelCase__ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) UpperCamelCase__ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): UpperCamelCase__ = train_dl.batch_sampler.even_batches UpperCamelCase__ = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = True UpperCamelCase__ = False UpperCamelCase__ = create_accelerator(even_batches=__a ) UpperCamelCase__ = torch.nn.Linear(1 , 1 ) UpperCamelCase__ = accelerator.prepare(__a ) create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a ) UpperCamelCase__ = create_dataloader(__a , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("""ignore""" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): UpperCamelCase__ = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = create_accelerator() UpperCamelCase__ = torch.nn.Linear(1 , 1 ) UpperCamelCase__ = accelerator.prepare(__a ) create_dataloader(__a , dataset_size=3 , batch_size=1 , iterable=__a ) with warnings.catch_warnings(record=__a ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__a ): pass assert issubclass(w[-1].category , __a ) assert "only supported for map-style datasets" in str(w[-1].message ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = create_accelerator() accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" ) test_default_ensures_even_batch_sizes() accelerator.print("""Run tests with even_batches disabled""" ) test_can_disable_even_batches() accelerator.print("""Test joining uneven inputs""" ) test_can_join_uneven_inputs() accelerator.print("""Test overriding even_batches when joining uneven inputs""" ) test_join_can_override_even_batches() accelerator.print("""Test overriding even_batches for mixed dataloader types""" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("""Test join with non DDP distributed raises warning""" ) UpperCamelCase__ = accelerator.state.distributed_type UpperCamelCase__ = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__a ) UpperCamelCase__ = original_state if __name__ == "__main__": main()
86
from ..utils import DummyObject, requires_backends class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] )
86
1
import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = 0 @slow def UpperCAmelCase_ (self ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 20 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Check that tokenizer_type ≠ model_type UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 12 ) def UpperCAmelCase_ (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(SCREAMING_SNAKE_CASE_ , """vocab.txt""" ) ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type="""bert""" , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(SCREAMING_SNAKE_CASE_ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(SCREAMING_SNAKE_CASE_ , """merges.txt""" ) ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type="""gpt2""" , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @require_tokenizers def UpperCAmelCase_ (self ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(SCREAMING_SNAKE_CASE_ , """vocab.txt""" ) ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type="""bert""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(SCREAMING_SNAKE_CASE_ , """vocab.json""" ) ) shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(SCREAMING_SNAKE_CASE_ , """merges.txt""" ) ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , tokenizer_type="""gpt2""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): with pytest.raises(SCREAMING_SNAKE_CASE_ ): AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" ) @require_tokenizers def UpperCAmelCase_ (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: UpperCamelCase__ = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , SCREAMING_SNAKE_CASE_ ) else: self.assertEqual(tokenizer.do_lower_case , SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.model_max_length , 5_12 ) @require_tokenizers def UpperCAmelCase_ (self ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ): UpperCamelCase__ = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" ) def UpperCAmelCase_ (self ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai UpperCamelCase__ = TOKENIZER_MAPPING.values() UpperCamelCase__ = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(SCREAMING_SNAKE_CASE_ ) @require_tokenizers def UpperCAmelCase_ (self ): self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , SCREAMING_SNAKE_CASE_ ) @require_tokenizers def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Hello, world. How are you?""" UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertEqual("""[UNK]""" , tokens[0] ) UpperCamelCase__ = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertEqual("""[UNK]""" , tokens[0] ) @require_tokenizers def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" ) self.assertEqual(type(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.model_max_length , 5_12 ) self.assertEqual(tokenizer.vocab_size , 3_00_00 ) self.assertEqual(tokenizer.unk_token , """[UNK]""" ) self.assertEqual(tokenizer.padding_side , """right""" ) self.assertEqual(tokenizer.truncation_side , """right""" ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 12 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""ctrl""" ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Check we can load the tokenizer config of an online model. UpperCamelCase__ = get_tokenizer_config("""bert-base-cased""" ) UpperCamelCase__ = config.pop("""_commit_hash""" , SCREAMING_SNAKE_CASE_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(SCREAMING_SNAKE_CASE_ , {"""do_lower_case""": False} ) # This model does not have a tokenizer_config so we get back an empty dict. UpperCamelCase__ = get_tokenizer_config(SCREAMING_SNAKE_CASE_ ) self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = get_tokenizer_config(SCREAMING_SNAKE_CASE_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" ) def UpperCAmelCase_ (self ): try: AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE_ ) AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = CustomTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def UpperCAmelCase_ (self ): try: AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE_ ) # Can register in two steps AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase__ = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ ) bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ (self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" ) @require_tokenizers def UpperCAmelCase_ (self ): class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = False class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = NewTokenizer SCREAMING_SNAKE_CASE__ = False try: AutoConfig.register("""custom""" , SCREAMING_SNAKE_CASE_ ) AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ ) AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , fast_tokenizer_class=SCREAMING_SNAKE_CASE_ ) # If remote code is not set, the default is to use local UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertFalse(tokenizer.special_attribute_present ) UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) self.assertTrue(tokenizer.special_attribute_present ) UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version UpperCamelCase__ = AutoTokenizer.from_pretrained( """hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def UpperCAmelCase_ (self ): with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""bert-base""" ) def UpperCAmelCase_ (self ): with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="""aaaaaa""" ) def UpperCAmelCase_ (self ): # Make sure we have cached the tokenizer. UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) with RequestCounter() as counter: UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
86
from __future__ import annotations from typing import TypedDict class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__a ) )] def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) UpperCamelCase__ = all_rotations(__a ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCamelCase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__a ), } return response def __magic_name__ ( __a : str , __a : int ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: UpperCamelCase__ = int(__a ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__a ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) UpperCamelCase__ = [""""""] * len(__a ) for _ in range(len(__a ) ): for i in range(len(__a ) ): UpperCamelCase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase_ = '''Provide a string that I will generate its BWT transform: ''' lowerCamelCase_ = input(entry_msg).strip() lowerCamelCase_ = bwt_transform(s) print( f'Burrows Wheeler transform for string \'{s}\' results ' f'in \'{result["bwt_string"]}\'' ) lowerCamelCase_ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' f'we get original string \'{original_string}\'' )
86
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowerCamelCase_ = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowerCamelCase_ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} lowerCamelCase_ = '''zero2''' lowerCamelCase_ = '''zero3''' lowerCamelCase_ = [ZEROa, ZEROa] def __magic_name__ ( __a : List[str] , __a : Tuple , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = parameterized.to_safe_name("""_""".join(str(__a ) for x in param.args ) ) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test lowerCamelCase_ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __A( __lowerCamelCase ): """simple docstring""" @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ): UpperCamelCase__ = models[model] UpperCamelCase__ = self.run_trainer( stage=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , eval_steps=SCREAMING_SNAKE_CASE_ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , ) self.do_checks(SCREAMING_SNAKE_CASE_ ) return output_dir def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ): UpperCamelCase__ = self.get_auto_remove_tmp_dir("""./xxx""" , after=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(SCREAMING_SNAKE_CASE_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files UpperCamelCase__ = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() UpperCamelCase__ = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] UpperCamelCase__ = self.get_launcher(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() ) return output_dir def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) UpperCamelCase__ = min(2 , get_gpu_count() ) if distributed else 1 return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
86
import os from datetime import datetime as dt from github import Github lowerCamelCase_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" ) UpperCamelCase__ = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
86
1
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CpmAntTokenizer SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): super().setUp() UpperCamelCase__ = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def UpperCAmelCase_ (self ): UpperCamelCase__ = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) UpperCamelCase__ = """今天天气真好!""" UpperCamelCase__ = ["""今天""", """天气""", """真""", """好""", """!"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """今天天气真好!""" UpperCamelCase__ = [tokenizer.bos_token] + tokens UpperCamelCase__ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = torch.from_numpy(__a ) return 2.0 * image - 1.0 class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ): if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): UpperCamelCase__ = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase__ = next(self.unet.parameters() ).dtype UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device ) UpperCamelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase__ = torch.cat([latents, image] , dim=1 ) UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase__ = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = torch.clamp(SCREAMING_SNAKE_CASE_ , -1.0 , 1.0 ) UpperCamelCase__ = image / 2 + 0.5 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
1
from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE_=(64,) , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_=True , ): super().__init__() UpperCamelCase__ = layers_per_block UpperCamelCase__ = torch.nn.Convad( SCREAMING_SNAKE_CASE_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) UpperCamelCase__ = None UpperCamelCase__ = nn.ModuleList([] ) # down UpperCamelCase__ = block_out_channels[0] for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = output_channel UpperCamelCase__ = block_out_channels[i] UpperCamelCase__ = i == len(SCREAMING_SNAKE_CASE_ ) - 1 UpperCamelCase__ = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , resnet_groups=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid UpperCamelCase__ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , ) # out UpperCamelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE_ , eps=1E-6 ) UpperCamelCase__ = nn.SiLU() UpperCamelCase__ = 2 * out_channels if double_z else out_channels UpperCamelCase__ = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE_ , 3 , padding=1 ) UpperCamelCase__ = False def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = x UpperCamelCase__ = self.conv_in(SCREAMING_SNAKE_CASE_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE_ ): def custom_forward(*SCREAMING_SNAKE_CASE_ ): return module(*SCREAMING_SNAKE_CASE_ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: UpperCamelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ ) # middle UpperCamelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ ) else: for down_block in self.down_blocks: UpperCamelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # middle UpperCamelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ ) else: # down for down_block in self.down_blocks: UpperCamelCase__ = down_block(SCREAMING_SNAKE_CASE_ ) # middle UpperCamelCase__ = self.mid_block(SCREAMING_SNAKE_CASE_ ) # post-process UpperCamelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.conv_act(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.conv_out(SCREAMING_SNAKE_CASE_ ) return sample class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE_=(64,) , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_="silu" , SCREAMING_SNAKE_CASE_="group" , ): super().__init__() UpperCamelCase__ = layers_per_block UpperCamelCase__ = nn.Convad( SCREAMING_SNAKE_CASE_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) UpperCamelCase__ = None UpperCamelCase__ = nn.ModuleList([] ) UpperCamelCase__ = in_channels if norm_type == """spatial""" else None # mid UpperCamelCase__ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , ) # up UpperCamelCase__ = list(reversed(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = reversed_block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = output_channel UpperCamelCase__ = reversed_block_out_channels[i] UpperCamelCase__ = i == len(SCREAMING_SNAKE_CASE_ ) - 1 UpperCamelCase__ = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , prev_output_channel=SCREAMING_SNAKE_CASE_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE_ , resnet_groups=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , temb_channels=SCREAMING_SNAKE_CASE_ , resnet_time_scale_shift=SCREAMING_SNAKE_CASE_ , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = output_channel # out if norm_type == "spatial": UpperCamelCase__ = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE_ , eps=1E-6 ) UpperCamelCase__ = nn.SiLU() UpperCamelCase__ = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE_ , 3 , padding=1 ) UpperCamelCase__ = False def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): UpperCamelCase__ = z UpperCamelCase__ = self.conv_in(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(SCREAMING_SNAKE_CASE_ ): def custom_forward(*SCREAMING_SNAKE_CASE_ ): return module(*SCREAMING_SNAKE_CASE_ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle UpperCamelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sample.to(SCREAMING_SNAKE_CASE_ ) # up for up_block in self.up_blocks: UpperCamelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , use_reentrant=SCREAMING_SNAKE_CASE_ ) else: # middle UpperCamelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sample.to(SCREAMING_SNAKE_CASE_ ) # up for up_block in self.up_blocks: UpperCamelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else: # middle UpperCamelCase__ = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sample.to(SCREAMING_SNAKE_CASE_ ) # up for up_block in self.up_blocks: UpperCamelCase__ = up_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # post-process if latent_embeds is None: UpperCamelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase__ = self.conv_norm_out(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.conv_act(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.conv_out(SCREAMING_SNAKE_CASE_ ) return sample class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="random" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True ): super().__init__() UpperCamelCase__ = n_e UpperCamelCase__ = vq_embed_dim UpperCamelCase__ = beta UpperCamelCase__ = legacy UpperCamelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) UpperCamelCase__ = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) UpperCamelCase__ = self.used.shape[0] UpperCamelCase__ = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": UpperCamelCase__ = self.re_embed UpperCamelCase__ = self.re_embed + 1 print( F"Remapping {self.n_e} indices to {self.re_embed} indices. " F"Using {self.unknown_index} for unknown indices." ) else: UpperCamelCase__ = n_e UpperCamelCase__ = sane_index_shape def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = inds.shape assert len(SCREAMING_SNAKE_CASE_ ) > 1 UpperCamelCase__ = inds.reshape(ishape[0] , -1 ) UpperCamelCase__ = self.used.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = (inds[:, :, None] == used[None, None, ...]).long() UpperCamelCase__ = match.argmax(-1 ) UpperCamelCase__ = match.sum(2 ) < 1 if self.unknown_index == "random": UpperCamelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: UpperCamelCase__ = self.unknown_index return new.reshape(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = inds.shape assert len(SCREAMING_SNAKE_CASE_ ) > 1 UpperCamelCase__ = inds.reshape(ishape[0] , -1 ) UpperCamelCase__ = self.used.to(SCREAMING_SNAKE_CASE_ ) if self.re_embed > self.used.shape[0]: # extra token UpperCamelCase__ = 0 # simply set to zero UpperCamelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE_ ) return back.reshape(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): # reshape z -> (batch, height, width, channel) and flatten UpperCamelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous() UpperCamelCase__ = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z UpperCamelCase__ = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE_ , self.embedding.weight ) , dim=1 ) UpperCamelCase__ = self.embedding(SCREAMING_SNAKE_CASE_ ).view(z.shape ) UpperCamelCase__ = None UpperCamelCase__ = None # compute loss for embedding if not self.legacy: UpperCamelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: UpperCamelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients UpperCamelCase__ = z + (z_q - z).detach() # reshape back to match original input shape UpperCamelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: UpperCamelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis UpperCamelCase__ = self.remap_to_used(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: UpperCamelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # shape specifying (batch, height, width, channel) if self.remap is not None: UpperCamelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis UpperCamelCase__ = self.unmap_to_all(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = indices.reshape(-1 ) # flatten again # get quantized latent vectors UpperCamelCase__ = self.embedding(SCREAMING_SNAKE_CASE_ ) if shape is not None: UpperCamelCase__ = z_q.view(SCREAMING_SNAKE_CASE_ ) # reshape back to match original input shape UpperCamelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = parameters UpperCamelCase__ , UpperCamelCase__ = torch.chunk(SCREAMING_SNAKE_CASE_ , 2 , dim=1 ) UpperCamelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 ) UpperCamelCase__ = deterministic UpperCamelCase__ = torch.exp(0.5 * self.logvar ) UpperCamelCase__ = torch.exp(self.logvar ) if self.deterministic: UpperCamelCase__ = UpperCamelCase__ = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = None ): # make sure sample is on the same device as the parameters and has same dtype UpperCamelCase__ = randn_tensor( self.mean.shape , generator=SCREAMING_SNAKE_CASE_ , device=self.parameters.device , dtype=self.parameters.dtype ) UpperCamelCase__ = self.mean + self.std * sample return x def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) UpperCamelCase__ = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): return self.mean
86
def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = len(__a ) UpperCamelCase__ = len(__a ) UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase__ = True for i in range(__a ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase__ = True if a[i].islower(): UpperCamelCase__ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
86
1
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def __magic_name__ ( __a : Optional[Any] , __a : str , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = { """en""": """Machine learning is great, isn't it?""", """ru""": """Машинное обучение - это здорово, не так ли?""", """de""": """Maschinelles Lernen ist großartig, oder?""", } # BLUE scores as follows: # "pair": [fairseq, transformers] UpperCamelCase__ = { """ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""], """en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""], """en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""], """de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""], } UpperCamelCase__ = f"{src_lang}-{tgt_lang}" UpperCamelCase__ = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.join(__a , """README.md""" ) print(f"Generating {path}" ) with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase_ = Path(__file__).resolve().parent.parent.parent lowerCamelCase_ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model_name.split('''-''') lowerCamelCase_ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
86
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
1
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __magic_name__ ( *__a : List[str] ): '''simple docstring''' if not isinstance(__a , __a ): UpperCamelCase__ = list(__a ) for i in range(len(__a ) ): UpperCamelCase__ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __magic_name__ ( __a : Exception ): '''simple docstring''' UpperCamelCase__ = [ """CUDA out of memory.""", # CUDA OOM """cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU """DefaultCPUAllocator: can't allocate memory""", # CPU OOM ] if isinstance(__a , __a ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __magic_name__ ( __a : callable = None , __a : int = 128 ): '''simple docstring''' if function is None: return functools.partial(__a , starting_batch_size=__a ) UpperCamelCase__ = starting_batch_size def decorator(*__a : int , **__a : List[str] ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() UpperCamelCase__ = list(inspect.signature(__a ).parameters.keys() ) # Guard against user error if len(__a ) < (len(__a ) + 1): UpperCamelCase__ = """, """.join([f"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f"Batch size was passed into `{function.__name__}` as the first argument when called." f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("""No executable batch size found, reached zero.""" ) try: return function(__a , *__a , **__a ) except Exception as e: if should_reduce_batch_size(__a ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
86
from PIL import Image def __magic_name__ ( __a : Image , __a : float ): '''simple docstring''' def brightness(__a : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 lowerCamelCase_ = change_brightness(img, 1_00) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
86
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class __A( unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=56 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="block_sparse" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_attention_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_choices UpperCamelCase__ = rescale_embeddings UpperCamelCase__ = attention_type UpperCamelCase__ = use_bias UpperCamelCase__ = block_size UpperCamelCase__ = num_random_blocks def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_attention_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs UpperCamelCase__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask, } return config, inputs_dict @require_flax class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase_ (self ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase_ (self ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase_ (self ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase_ (self ): super().test_hidden_states_output() @slow def UpperCAmelCase_ (self ): for model_class_name in self.all_model_classes: UpperCamelCase__ = model_class_name.from_pretrained("""google/bigbird-roberta-base""" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ): return model(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="outputs" , SCREAMING_SNAKE_CASE_=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("""outputs.attentions""" ): return else: super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCamelCase_ = [None] * 10_00_00_00 lowerCamelCase_ = True lowerCamelCase_ = False def __magic_name__ ( __a : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCamelCase__ = chain(next_number(__a ) ) UpperCamelCase__ = number_chain while number < 10_000_000: UpperCamelCase__ = number_chain number *= 10 return number_chain def __magic_name__ ( __a : int = 10_000_000 ): '''simple docstring''' for i in range(1 , __a ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__a ) if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution() = }')
86
1
class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if sources is int: UpperCamelCase__ = [sources] if sinks is int: UpperCamelCase__ = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return UpperCamelCase__ = sources[0] UpperCamelCase__ = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: UpperCamelCase__ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase__ = max_input_flow UpperCamelCase__ = 0 UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase__ = max_input_flow UpperCamelCase__ = size - 1 def UpperCAmelCase_ (self ): if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = algorithm(self ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = flow_network UpperCamelCase__ = flow_network.verticesCount UpperCamelCase__ = flow_network.sourceIndex UpperCamelCase__ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase__ = flow_network.graph UpperCamelCase__ = False def UpperCAmelCase_ (self ): if not self.executed: self._algorithm() UpperCamelCase__ = True def UpperCAmelCase_ (self ): pass class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result UpperCamelCase__ = -1 def UpperCAmelCase_ (self ): if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase__ = [0] * self.verticies_count UpperCamelCase__ = [0] * self.verticies_count def UpperCAmelCase_ (self ): UpperCamelCase__ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase__ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase__ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = vertices_list[i] UpperCamelCase__ = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = 0 else: i += 1 UpperCamelCase__ = sum(self.preflow[self.source_index] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase__ = self.heights[to_index] if min_height is not None: UpperCamelCase__ = min_height + 1 if __name__ == "__main__": lowerCamelCase_ = [0] lowerCamelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowerCamelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowerCamelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowerCamelCase_ = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
86
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(__a , __a ) lowerCamelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = list(s_dict.keys() ) for key in keys: UpperCamelCase__ = key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCamelCase__ = new_key.replace(__a , __a ) print(f"{key} -> {new_key}" ) UpperCamelCase__ = s_dict.pop(__a ) return s_dict def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.basename(__a ) UpperCamelCase__ = url.split("""/""" )[-2] UpperCamelCase__ = os.path.join(__a , __a ) if os.path.exists(__a ) and not os.path.isfile(__a ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(__a ): UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop: while True: UpperCamelCase__ = source.read(8_192 ) if not buffer: break output.write(__a ) loop.update(len(__a ) ) UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ): '''simple docstring''' if ".pt" not in checkpoint_path: UpperCamelCase__ = _download(_MODELS[checkpoint_path] ) else: UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = original_checkpoint["""dims"""] UpperCamelCase__ = original_checkpoint["""model_state_dict"""] UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(__a ) rename_keys(__a ) UpperCamelCase__ = True UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] UpperCamelCase__ = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) UpperCamelCase__ = WhisperForConditionalGeneration(__a ) UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase__ = proj_out_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
86
1
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (DDPMParallelScheduler,) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**SCREAMING_SNAKE_CASE_ ) return config def UpperCAmelCase_ (self ): for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter UpperCamelCase__ = self.dummy_sample_deter + 0.1 UpperCamelCase__ = self.dummy_sample_deter - 0.1 UpperCamelCase__ = samplea.shape[0] UpperCamelCase__ = torch.stack([samplea, samplea, samplea] , dim=0 ) UpperCamelCase__ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) UpperCamelCase__ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 1153.1833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter UpperCamelCase__ = torch.manual_seed(0 ) for t in reversed(range(SCREAMING_SNAKE_CASE_ ) ): # 1. predict noise residual UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase__ = pred_prev_sample UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.dummy_model() UpperCamelCase__ = self.dummy_sample_deter UpperCamelCase__ = torch.manual_seed(0 ) for t in reversed(range(SCREAMING_SNAKE_CASE_ ) ): # 1. predict noise residual UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase__ = pred_prev_sample UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = scheduler.timesteps for i, timestep in enumerate(SCREAMING_SNAKE_CASE_ ): if i == len(SCREAMING_SNAKE_CASE_ ) - 1: UpperCamelCase__ = -1 else: UpperCamelCase__ = timesteps[i + 1] UpperCamelCase__ = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = prev_t.item() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [1_00, 87, 50, 51, 0] with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [1_00, 87, 50, 1, 0] UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.scheduler_classes[0] UpperCamelCase__ = self.get_scheduler_config() UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
86
def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = [[0 for _ in range(__a )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCamelCase__ = 1 for n in range(m + 1 ): for k in range(1 , __a ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase_ = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCamelCase_ = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
86
1
from math import factorial lowerCamelCase_ = {str(digit): factorial(digit) for digit in range(10)} def __magic_name__ ( __a : int ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""Parameter number must be int""" ) if number < 0: raise ValueError("""Parameter number must be greater than or equal to 0""" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__a ) ) def __magic_name__ ( __a : int = 60 , __a : int = 1_000_000 ): '''simple docstring''' if not isinstance(__a , __a ) or not isinstance(__a , __a ): raise TypeError("""Parameters chain_length and number_limit must be int""" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( """Parameters chain_length and number_limit must be greater than 0""" ) # the counter for the chains with the exact desired length UpperCamelCase__ = 0 # the cached sizes of the previous chains UpperCamelCase__ = {} for start_chain_element in range(1 , __a ): # The temporary set will contain the elements of the chain UpperCamelCase__ = set() UpperCamelCase__ = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. UpperCamelCase__ = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__a ) chain_set_length += 1 UpperCamelCase__ = digit_factorial_sum(__a ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] UpperCamelCase__ = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution()}')
86
class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = None def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if sources is int: UpperCamelCase__ = [sources] if sinks is int: UpperCamelCase__ = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return UpperCamelCase__ = sources[0] UpperCamelCase__ = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: UpperCamelCase__ = 0 for i in sources: max_input_flow += sum(self.graph[i] ) UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: UpperCamelCase__ = max_input_flow UpperCamelCase__ = 0 UpperCamelCase__ = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: UpperCamelCase__ = max_input_flow UpperCamelCase__ = size - 1 def UpperCAmelCase_ (self ): if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = algorithm(self ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = flow_network UpperCamelCase__ = flow_network.verticesCount UpperCamelCase__ = flow_network.sourceIndex UpperCamelCase__ = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that UpperCamelCase__ = flow_network.graph UpperCamelCase__ = False def UpperCAmelCase_ (self ): if not self.executed: self._algorithm() UpperCamelCase__ = True def UpperCAmelCase_ (self ): pass class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result UpperCamelCase__ = -1 def UpperCAmelCase_ (self ): if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ ): super().__init__(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )] UpperCamelCase__ = [0] * self.verticies_count UpperCamelCase__ = [0] * self.verticies_count def UpperCAmelCase_ (self ): UpperCamelCase__ = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule UpperCamelCase__ = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list UpperCamelCase__ = 0 while i < len(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = vertices_list[i] UpperCamelCase__ = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) UpperCamelCase__ = 0 else: i += 1 UpperCamelCase__ = sum(self.preflow[self.source_index] ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): UpperCamelCase__ = self.heights[to_index] if min_height is not None: UpperCamelCase__ = min_height + 1 if __name__ == "__main__": lowerCamelCase_ = [0] lowerCamelCase_ = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] lowerCamelCase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network lowerCamelCase_ = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate lowerCamelCase_ = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
86
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowerCamelCase_ = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowerCamelCase_ = concatenate_datasets lowerCamelCase_ = DownloadConfig lowerCamelCase_ = DownloadManager lowerCamelCase_ = DownloadMode lowerCamelCase_ = DownloadConfig lowerCamelCase_ = DownloadMode lowerCamelCase_ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
86
from timeit import timeit def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: number &= number - 1 result += 1 return result def __magic_name__ ( __a : int ): '''simple docstring''' if number < 0: raise ValueError("""the value of input must not be negative""" ) UpperCamelCase__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __magic_name__ ( ): '''simple docstring''' def do_benchmark(__a : int ) -> None: UpperCamelCase__ = """import __main__ as z""" print(f"Benchmark when {number = }:" ) print(f"{get_set_bits_count_using_modulo_operator(__a ) = }" ) UpperCamelCase__ = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__a ) print(f"timeit() runs in {timing} seconds" ) print(f"{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }" ) UpperCamelCase__ = timeit( """z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__a , ) print(f"timeit() runs in {timing} seconds" ) for number in (25, 37, 58, 0): do_benchmark(__a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
86
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
86
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __A( __lowerCamelCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def UpperCAmelCase_ (self ): import PIL.Image UpperCamelCase__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects: UpperCamelCase__ = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) ) UpperCamelCase__ , UpperCamelCase__ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def __magic_name__ ( __a : List[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Tuple , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=__a , features=__a ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pa.ipc.open_stream(__a ) UpperCamelCase__ = f.read_all() UpperCamelCase__ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__a ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def __magic_name__ ( __a : Union[str, Any] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : List[Any] , __a : Optional[int] ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Union[str, Any] , __a : Any ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def __magic_name__ ( __a : Optional[Any] , __a : int ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() UpperCamelCase__ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def __magic_name__ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCamelCase__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCamelCase__ = os.path.join(__a , """test.arrow""" ) with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(__a , 1 ) def __magic_name__ ( __a : Any ): '''simple docstring''' if pa.types.is_list(__a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def __magic_name__ ( __a : Optional[int] , __a : Any ): '''simple docstring''' if isinstance(lst[0] , __a ): change_first_primitive_element_in_list(lst[0] , __a ) else: UpperCamelCase__ = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] , __a : Tuple ): '''simple docstring''' UpperCamelCase__ = pa.array(TypedSequence(__a , optimized_int_type=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def __magic_name__ ( __a : Optional[int] , __a : str , __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCamelCase__ = copy.deepcopy(__a ) UpperCamelCase__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__a , __a ) UpperCamelCase__ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def __magic_name__ ( __a : List[str] , __a : List[str] ): '''simple docstring''' UpperCamelCase__ = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=__a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def __magic_name__ ( __a : Tuple ): '''simple docstring''' UpperCamelCase__ = """mock://dataset-train.arrow""" with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__a ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter(stream=__a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCamelCase__ , UpperCamelCase__ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def __magic_name__ ( __a : str , __a : Any ): '''simple docstring''' import PIL.Image UpperCamelCase__ = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" ) UpperCamelCase__ = pa.BufferOutputStream() with ParquetWriter( stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCamelCase__ = pa.BufferReader(output.getvalue() ) UpperCamelCase__ = pq.read_table(__a ) UpperCamelCase__ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , __a ) with open(__a , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] ) UpperCamelCase__ = pa.BufferOutputStream() with ArrowWriter(stream=__a ) as writer: writer._build_writer(inferred_schema=__a ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
86
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = EsmModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": EsmModel, """fill-mask""": EsmForMaskedLM, """text-classification""": EsmForSequenceClassification, """token-classification""": EsmForTokenClassification, """zero-shot""": EsmForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = True def UpperCAmelCase_ (self ): UpperCamelCase__ = EsmModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()[0] UpperCamelCase__ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) UpperCamelCase__ = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) UpperCamelCase__ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()[0] UpperCamelCase__ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.empty(2 , 4 , 30 ) UpperCamelCase__ = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] UpperCamelCase__ = torch.as_tensor([expected_single_positions, expected_single_positions] ) UpperCamelCase__ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def UpperCAmelCase_ (self ): pass @unittest.skip("""Esm does not support embedding resizing""" ) def UpperCAmelCase_ (self ): pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase_ (self ): pass @require_torch class __A( __lowerCamelCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): with torch.no_grad(): UpperCamelCase__ = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() UpperCamelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 33 UpperCamelCase__ = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): with torch.no_grad(): UpperCamelCase__ = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() UpperCamelCase__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] # compare the actual values for a slice. UpperCamelCase__ = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
86
from sklearn.metrics import matthews_corrcoef import datasets lowerCamelCase_ = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' lowerCamelCase_ = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' lowerCamelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A( datasets.Metric ): """simple docstring""" def UpperCAmelCase_ (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html""" ] , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ): return { "matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ), }
86
1
from ..utils import DummyObject, requires_backends class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """transformers""", """onnx"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """transformers""", """onnx"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """transformers""", """onnx"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """transformers""", """onnx"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """transformers""", """onnx"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """transformers""", """onnx"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
86
def __magic_name__ ( __a : str ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = credit_card_number UpperCamelCase__ = 0 UpperCamelCase__ = len(__a ) - 2 for i in range(__a , -1 , -2 ): # double the value of every second digit UpperCamelCase__ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCamelCase__ = cc_number[:i] + str(__a ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__a ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = f"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(f"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(__a ) <= 16: print(f"{error_message} of its length." ) return False if not validate_initial_digits(__a ): print(f"{error_message} of its first two digits." ) return False if not luhn_validation(__a ): print(f"{error_message} it fails the Luhn check." ) return False print(f"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
86
1
from __future__ import annotations from math import gcd def __magic_name__ ( __a : int , __a : int = 2 , __a : int = 1 , __a : int = 3 , ): '''simple docstring''' if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__a : int , __a : int , __a : int ) -> int: return (pow(__a , 2 ) + step) % modulus for _ in range(__a ): # These track the position within the cycle detection logic. UpperCamelCase__ = seed UpperCamelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. UpperCamelCase__ = rand_fn(__a , __a , __a ) UpperCamelCase__ = rand_fn(__a , __a , __a ) UpperCamelCase__ = rand_fn(__a , __a , __a ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. UpperCamelCase__ = gcd(hare - tortoise , __a ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. UpperCamelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'{args.num} is probably prime') else: lowerCamelCase_ = args.num // divisor print(f'{args.num} = {divisor} * {quotient}')
86
def __magic_name__ ( __a : int = 50 ): '''simple docstring''' UpperCamelCase__ = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
86
1
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right lowerCamelCase_ = 5_00_03 lowerCamelCase_ = 5_00_02 @require_sentencepiece @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = PLBartTokenizer SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase__ = PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes="""base""" , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ (self ): UpperCamelCase__ = PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes="""base""" , keep_accents=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) UpperCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) UpperCamelCase__ = tokenizer.vocab_size UpperCamelCase__ = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE_ )] self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) UpperCamelCase__ = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual( tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self ): UpperCamelCase__ = PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes="""multi""" , keep_accents=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) UpperCamelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) UpperCamelCase__ = tokenizer.vocab_size UpperCamelCase__ = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE_ )] self.assertListEqual( SCREAMING_SNAKE_CASE_ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) UpperCamelCase__ = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual( tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , ) @require_torch @require_sentencepiece @require_tokenizers class __A( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """uclanlp/plbart-python-en_XX""" SCREAMING_SNAKE_CASE__ = [ """def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""", """def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""", ] SCREAMING_SNAKE_CASE__ = [ """Returns the maximum value of a b c.""", """Sums the values of a b c.""", ] SCREAMING_SNAKE_CASE__ = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def UpperCAmelCase_ (cls ): UpperCamelCase__ = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) UpperCamelCase__ = 1 return cls def UpperCAmelCase_ (self ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids ) UpperCamelCase__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2] UpperCamelCase__ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 10 UpperCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] ) def UpperCAmelCase_ (self ): UpperCamelCase__ = tempfile.mkdtemp() UpperCamelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ ) @require_torch def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) UpperCamelCase__ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCamelCase__ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) UpperCamelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="""pt""" ) UpperCamelCase__ = self.tokenizer( text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors="""pt""" ) UpperCamelCase__ = targets["""input_ids"""] UpperCamelCase__ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE_ ) , { # A, test, EOS, en_XX """input_ids""": [[1_50, 2_42, 2, 5_00_03]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_00_01, } , )
86
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = RobertaTokenizer SCREAMING_SNAKE_CASE__ = RobertaTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {"""cls_token""": """<s>"""} def UpperCAmelCase_ (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase__ = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCamelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCamelCase__ = {"""unk_token""": """<unk>"""} UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """lower newer""" UpperCamelCase__ = """lower newer""" return input_text, output_text def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase__ = """lower newer""" UpperCamelCase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokens + [tokenizer.unk_token] UpperCamelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = self.tokenizer_class.from_pretrained("""roberta-base""" ) UpperCamelCase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase_ (self ): UpperCamelCase__ = self.get_tokenizer() UpperCamelCase__ = """Encode this sequence.""" UpperCamelCase__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens UpperCamelCase__ = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space UpperCamelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """Encode <mask> sequence""" UpperCamelCase__ = """Encode <mask>sequence""" UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = encoded.index(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """A, <mask> AllenNLP sentence.""" UpperCamelCase__ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def UpperCAmelCase_ (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""add_prefix_space"""] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state["""trim_offsets"""] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCamelCase__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase__ = F"{text_of_1_token} {text_of_1_token}" UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
86
1
def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' return abs(__a ) if a == 0 else greatest_common_divisor(b % a , __a ) def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCamelCase__ , UpperCamelCase__ = y, x % y return abs(__a ) def __magic_name__ ( ): '''simple docstring''' try: UpperCamelCase__ = input("""Enter two integers separated by comma (,): """ ).split(""",""" ) UpperCamelCase__ = int(nums[0] ) UpperCamelCase__ = int(nums[1] ) print( f"greatest_common_divisor({num_a}, {num_a}) = " f"{greatest_common_divisor(__a , __a )}" ) print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__a , __a )}" ) except (IndexError, UnboundLocalError, ValueError): print("""Wrong input""" ) if __name__ == "__main__": main()
86
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed lowerCamelCase_ = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __a : Any ): '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __a : List[Any] , __a : Any ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False elif args.student_type == "gpt2": UpperCamelCase__ = False def __magic_name__ ( __a : int , __a : Dict ): '''simple docstring''' if args.student_type == "roberta": UpperCamelCase__ = False def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__a , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" ) UpperCamelCase__ = parser.parse_args() sanity_checks(__a ) # ARGS # init_gpu_params(__a ) set_seed(__a ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"Experiment will be dumped and logged in {args.dump_path}" ) # SAVE PARAMS # logger.info(f"Param: {args}" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__a ) , __a , indent=4 ) git_log(args.dump_path ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type] UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) UpperCamelCase__ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): UpperCamelCase__ = tokenizer.all_special_tokens.index(__a ) UpperCamelCase__ = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}" ) UpperCamelCase__ = special_tok_ids UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}" ) with open(args.data_file , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" ) with open(args.token_counts , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a ) UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): UpperCamelCase__ = 0.0 # do not predict special tokens UpperCamelCase__ = torch.from_numpy(__a ) else: UpperCamelCase__ = None UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"Loading student config from {args.student_config}" ) UpperCamelCase__ = student_config_class.from_pretrained(args.student_config ) UpperCamelCase__ = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" ) UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a ) else: UpperCamelCase__ = student_model_class(__a ) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}" ) logger.info("""Student loaded.""" ) # TEACHER # UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a ) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}" ) logger.info(f"Teacher loaded from {args.teacher_name}." ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__a , __a ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__a , __a ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() UpperCamelCase__ = Distiller( params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
86
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __A( unittest.TestCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , ): UpperCamelCase__ = size if size is not None else {"""height""": 18, """width""": 18} UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = num_channels UpperCamelCase__ = image_size UpperCamelCase__ = min_resolution UpperCamelCase__ = max_resolution UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = apply_ocr def UpperCAmelCase_ (self ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __A( __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def UpperCAmelCase_ (self ): UpperCamelCase__ = LayoutLMvaImageProcessingTester(self ) @property def UpperCAmelCase_ (self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """apply_ocr""" ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): # Initialize image_processing UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE_ ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def UpperCAmelCase_ (self ): # Initialize image_processing UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def UpperCAmelCase_ (self ): # Initialize image_processing UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def UpperCAmelCase_ (self ): # with apply_OCR = True UpperCamelCase__ = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" ) UpperCamelCase__ = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 UpperCamelCase__ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE_ ) # with apply_OCR = False UpperCamelCase__ = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
86
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
86
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self ): ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = self.prepare_config_and_inputs() UpperCamelCase__ = True UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = True UpperCamelCase__ = NezhaModel(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_choices UpperCamelCase__ = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def UpperCAmelCase_ (self ): UpperCamelCase__ = NezhaModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # This regression test was failing with PyTorch < 1.3 ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCamelCase__ = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return UpperCamelCase__ = True UpperCamelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , """bert.pt""" ) ) UpperCamelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , """bert.pt""" ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) UpperCamelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) UpperCamelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) UpperCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
86
import math from typing import Callable, List, Optional, Union import numpy as np import PIL import torch from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ): '''simple docstring''' UpperCamelCase__ = size[0] - overlap_pixels * 2 UpperCamelCase__ = size[1] - overlap_pixels * 2 for letter in ["l", "r"]: if letter in remove_borders: size_x += overlap_pixels for letter in ["t", "b"]: if letter in remove_borders: size_y += overlap_pixels UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255 UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 ) if "l" in remove_borders: UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]] if "r" in remove_borders: UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels] if "t" in remove_borders: UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :] if "b" in remove_borders: UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :] return mask def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ): '''simple docstring''' return max(__a , min(__a , __a ) ) def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ): '''simple docstring''' return ( clamp(rect[0] , min[0] , max[0] ), clamp(rect[1] , min[1] , max[1] ), clamp(rect[2] , min[0] , max[0] ), clamp(rect[3] , min[1] , max[1] ), ) def __magic_name__ ( __a : [int] , __a : int , __a : [int] ): '''simple docstring''' UpperCamelCase__ = list(__a ) rect[0] -= overlap rect[1] -= overlap rect[2] += overlap rect[3] += overlap UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] ) return rect def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ): '''simple docstring''' UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) ) result.paste( original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop( (slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , ) result.paste(__a , (original_slice, 0) ) return result def __magic_name__ ( __a : int , __a : int ): '''simple docstring''' UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1]) UpperCamelCase__ = tile.crop(__a ) return tile def __magic_name__ ( __a : List[str] , __a : Any ): '''simple docstring''' UpperCamelCase__ = n % d return n - divisor class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ): super().__init__( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): torch.manual_seed(0 ) UpperCamelCase__ = ( min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ), min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ), min(image.size[0] , (x + 1) * tile_size ), min(image.size[1] , (y + 1) * tile_size ), ) UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size ) UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0] UpperCamelCase__ = translated_slice_x - (original_image_slice / 2) UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = to_input.size UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC ) UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0] UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC ) UpperCamelCase__ = [] if x == 0: remove_borders.append("""l""" ) elif crop_rect[2] == image.size[0]: remove_borders.append("""r""" ) if y == 0: remove_borders.append("""t""" ) elif crop_rect[3] == image.size[1]: remove_borders.append("""b""" ) UpperCamelCase__ = Image.fromarray( make_transparency_mask( (upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , ) final_image.paste( SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ): UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) ) UpperCamelCase__ = math.ceil(image.size[0] / tile_size ) UpperCamelCase__ = math.ceil(image.size[1] / tile_size ) UpperCamelCase__ = tcx * tcy UpperCamelCase__ = 0 for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): self._process_tile( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , ) current_count += 1 if callback is not None: callback({"""progress""": current_count / total_tile_count, """image""": final_image} ) return final_image def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler""" UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa ) UpperCamelCase__ = pipe.to("""cuda""" ) UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" ) def callback(__a : Optional[int] ): print(f"progress: {obj['progress']:.4f}" ) obj["image"].save("""diffusers_library_progress.jpg""" ) UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a ) final_image.save("""diffusers_library.jpg""" ) if __name__ == "__main__": main()
86
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = -1 UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ ) model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCamelCase__ = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = -1 UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(greedy_ids[0] ) UpperCamelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCamelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ ) thread.start() UpperCamelCase__ = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = -1 UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = greedy_ids[:, input_ids.shape[1] :] UpperCamelCase__ = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ ) model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer UpperCamelCase__ = cs.out[:-1] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them UpperCamelCase__ = AutoTokenizer.from_pretrained("""distilgpt2""" ) UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = -1 UpperCamelCase__ = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token UpperCamelCase__ = cs.out[:-1] # Remove the final "\n" UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = -1 UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.001 ) UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} UpperCamelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = """""" for new_text in streamer: streamer_text += new_text
86
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules( vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCamelCase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(SCREAMING_SNAKE_CASE_ )}." ) # get prompt text embeddings UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCamelCase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_embeddings.shape UpperCamelCase__ = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase__ = 42 if negative_prompt is None: UpperCamelCase__ = [""""""] elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !=" F" {type(SCREAMING_SNAKE_CASE_ )}." ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [negative_prompt] elif batch_size != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCamelCase__ = negative_prompt UpperCamelCase__ = text_input_ids.shape[-1] UpperCamelCase__ = self.tokenizer( SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , ) UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase__ = uncond_embeddings.shape[1] UpperCamelCase__ = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 ) UpperCamelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCamelCase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(self.device ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to( self.device ) else: UpperCamelCase__ = torch.randn( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) else: if latents_reference.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) UpperCamelCase__ = latents_reference.to(self.device ) UpperCamelCase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCamelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCamelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCamelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCamelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCamelCase__ = 0 if dx < 0 else dx UpperCamelCase__ = 0 if dy < 0 else dy UpperCamelCase__ = max(-dx , 0 ) UpperCamelCase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCamelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 ) UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = 1 / 0.1_8215 * latents UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCamelCase__ = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).to( self.device ) UpperCamelCase__ , UpperCamelCase__ = self.safety_checker( images=SCREAMING_SNAKE_CASE_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCamelCase__ = None if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
86
1
import os from datetime import datetime as dt from github import Github lowerCamelCase_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" ) UpperCamelCase__ = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
86
from ..utils import DummyObject, requires_backends class __A( metaclass=__lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""torch""", """torchsde"""] def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): requires_backends(cls , ["""torch""", """torchsde"""] )
86
1
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ): if audio_length_in_s is None: UpperCamelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase__ = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase__ = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"{audio_length_in_s} is too small. Make sure it's bigger or equal to" F" {3 * down_scale_factor / self.unet.config.sample_rate}." ) UpperCamelCase__ = int(SCREAMING_SNAKE_CASE_ ) if sample_size % down_scale_factor != 0: UpperCamelCase__ = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" """ process.""" ) UpperCamelCase__ = int(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase__ = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators." ) UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=audio.device ) UpperCamelCase__ = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample UpperCamelCase__ = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase__ = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE_ )
86
from __future__ import annotations from typing import TypedDict class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(__a ) )] def __magic_name__ ( __a : str ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) UpperCamelCase__ = all_rotations(__a ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCamelCase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__a ), } return response def __magic_name__ ( __a : str , __a : int ): '''simple docstring''' if not isinstance(__a , __a ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: UpperCamelCase__ = int(__a ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(__a ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) UpperCamelCase__ = [""""""] * len(__a ) for _ in range(len(__a ) ): for i in range(len(__a ) ): UpperCamelCase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase_ = '''Provide a string that I will generate its BWT transform: ''' lowerCamelCase_ = input(entry_msg).strip() lowerCamelCase_ = bwt_transform(s) print( f'Burrows Wheeler transform for string \'{s}\' results ' f'in \'{result["bwt_string"]}\'' ) lowerCamelCase_ = reverse_bwt(result['''bwt_string'''], result['''idx_original_string''']) print( f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' f'we get original string \'{original_string}\'' )
86
1
def __magic_name__ ( __a : str ): '''simple docstring''' return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") ) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = credit_card_number UpperCamelCase__ = 0 UpperCamelCase__ = len(__a ) - 2 for i in range(__a , -1 , -2 ): # double the value of every second digit UpperCamelCase__ = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 UpperCamelCase__ = cc_number[:i] + str(__a ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__a ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = f"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(f"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(__a ) <= 16: print(f"{error_message} of its length." ) return False if not validate_initial_digits(__a ): print(f"{error_message} of its first two digits." ) return False if not luhn_validation(__a ): print(f"{error_message} it fails the Luhn check." ) return False print(f"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
86
import os from datetime import datetime as dt from github import Github lowerCamelCase_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] ) UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" ) UpperCamelCase__ = repo.get_issues(state="""open""" ) for issue in open_issues: UpperCamelCase__ = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=__a ) UpperCamelCase__ = comments[0] if len(__a ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
86
1
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) UpperCamelCase__ = AutoTokenizer.from_pretrained("""xlm-roberta-base""" ) UpperCamelCase__ = """The dog is cute and lives in the garden house""" UpperCamelCase__ = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE_ )] ) UpperCamelCase__ = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim UpperCamelCase__ = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
86
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = image.size UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0 UpperCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ = torch.from_numpy(__a ) return 2.0 * image - 1.0 class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): super().__init__() self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def __call__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ): if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = 1 elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ): UpperCamelCase__ = image.shape[0] else: raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" ) if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase__ = next(self.unet.parameters() ).dtype UpperCamelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device ) UpperCamelCase__ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase__ = {} if accepts_eta: UpperCamelCase__ = eta for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase__ = torch.cat([latents, image] , dim=1 ) UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # predict the noise residual UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase__ = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase__ = torch.clamp(SCREAMING_SNAKE_CASE_ , -1.0 , 1.0 ) UpperCamelCase__ = image / 2 + 0.5 UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
86
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
86
def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' UpperCamelCase__ = len(__a ) UpperCamelCase__ = len(__a ) UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] UpperCamelCase__ = True for i in range(__a ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: UpperCamelCase__ = True if a[i].islower(): UpperCamelCase__ = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
86
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class __A( __lowerCamelCase ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config is None: assert isinstance(self.model , SCREAMING_SNAKE_CASE_ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F" {self.model.__class__}" ) UpperCamelCase__ = self.model.config else: UpperCamelCase__ = config UpperCamelCase__ = data_args UpperCamelCase__ = self.config.tgt_vocab_size if isinstance(self.config , SCREAMING_SNAKE_CASE_ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for" """ padding..""" ) if self.args.label_smoothing == 0: UpperCamelCase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss UpperCamelCase__ = label_smoothed_nll_loss def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): if self.optimizer is None: UpperCamelCase__ = ["""bias""", """LayerNorm.weight"""] UpperCamelCase__ = [ { """params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], """weight_decay""": self.args.weight_decay, }, { """params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] UpperCamelCase__ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: UpperCamelCase__ = Adafactor UpperCamelCase__ = {"""scale_parameter""": False, """relative_step""": False} else: UpperCamelCase__ = AdamW UpperCamelCase__ = { """betas""": (self.args.adam_betaa, self.args.adam_betaa), """eps""": self.args.adam_epsilon, } UpperCamelCase__ = self.args.learning_rate if self.sharded_ddp: UpperCamelCase__ = OSS( params=SCREAMING_SNAKE_CASE_ , optim=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: UpperCamelCase__ = optimizer_cls(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.lr_scheduler is None: UpperCamelCase__ = self._get_lr_scheduler(SCREAMING_SNAKE_CASE_ ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": UpperCamelCase__ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": UpperCamelCase__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: UpperCamelCase__ = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ ) return scheduler def UpperCAmelCase_ (self ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models UpperCamelCase__ , UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[:2] else: # compute label smoothed loss UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = torch.nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) UpperCamelCase__ , UpperCamelCase__ = self.loss_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = inputs.pop("""labels""" ) UpperCamelCase__ , UpperCamelCase__ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return loss def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ): UpperCamelCase__ = self._prepare_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """max_length""": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, """num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: UpperCamelCase__ = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **SCREAMING_SNAKE_CASE_ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: UpperCamelCase__ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs["""max_length"""] ) UpperCamelCase__ = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data UpperCamelCase__ , UpperCamelCase__ = self._compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) UpperCamelCase__ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: UpperCamelCase__ = self._pad_tensors_to_max_len(SCREAMING_SNAKE_CASE_ , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # If PAD token is not defined at least EOS token has to be defined UpperCamelCase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" F" padded to `max_length`={max_length}" ) UpperCamelCase__ = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) UpperCamelCase__ = tensor return padded_tensor
86
from __future__ import annotations lowerCamelCase_ = '''#''' class __A: """simple docstring""" def __init__(self ): UpperCamelCase__ = {} def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in text: if char not in trie: UpperCamelCase__ = {} UpperCamelCase__ = trie[char] UpperCamelCase__ = True def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self._trie for char in prefix: if char in trie: UpperCamelCase__ = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = [] for c, v in d.items(): UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = Trie() lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''') for word in words: trie.insert_word(word) def __magic_name__ ( __a : str ): '''simple docstring''' UpperCamelCase__ = trie.find_word(__a ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ): '''simple docstring''' print(autocomplete_using_trie("""de""" ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
86
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __A( unittest.TestCase ): """simple docstring""" @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , ) return model @property def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.dummy_uncond_unet UpperCamelCase__ = DDIMScheduler() UpperCamelCase__ = self.dummy_vq_model UpperCamelCase__ = LDMPipeline(unet=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) ldm.to(SCREAMING_SNAKE_CASE_ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""numpy""" ).images UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = image[0, -3:, -3:, -1] UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) UpperCamelCase__ = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): UpperCamelCase__ = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" ) ldm.to(SCREAMING_SNAKE_CASE_ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.manual_seed(0 ) UpperCamelCase__ = ldm(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type="""numpy""" ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCamelCase__ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] ) UpperCamelCase__ = 1E-2 if torch_device != """mps""" else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
86
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = seq_length UpperCamelCase__ = is_training UpperCamelCase__ = use_input_mask UpperCamelCase__ = use_token_type_ids UpperCamelCase__ = use_labels UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = type_vocab_size UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = num_labels UpperCamelCase__ = num_choices UpperCamelCase__ = scope def UpperCAmelCase_ (self ): UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ = None if self.use_input_mask: UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase__ = None if self.use_token_type_ids: UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase__ = None UpperCamelCase__ = None UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ (self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # create attention mask UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.seq_length // 2 UpperCamelCase__ = 0 # first forward pass UpperCamelCase__ , UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCamelCase__ = ids_tensor((1,) , SCREAMING_SNAKE_CASE_ ).item() + 1 UpperCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCamelCase__ = random_other_next_tokens # append to next input_ids and attn_mask UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )] , dim=1 , ) # get two different outputs UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval() UpperCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # first forward pass UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[ """last_hidden_state""" ] # select random slice UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ): UpperCamelCase__ = BioGptForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = BioGptModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.num_labels UpperCamelCase__ = BioGptForTokenClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() ( ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ( UpperCamelCase__ ) , ) = config_and_inputs UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = (BioGptForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE_ , gradient_checkpointing=SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = """left""" # Define PAD Token = EOS Token = 50256 UpperCamelCase__ = tokenizer.eos_token UpperCamelCase__ = model.config.eos_token_id # use different length sentences to test batching UpperCamelCase__ = [ """Hello, my dog is a little""", """Today, I""", ] UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , ) UpperCamelCase__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() UpperCamelCase__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_length=model.config.max_length - num_paddings ) UpperCamelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase_ (self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ = 3 UpperCamelCase__ = """multi_label_classification""" UpperCamelCase__ = input_dict["""input_ids"""] UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase__ = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __A( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0] UpperCamelCase__ = 4_23_84 UpperCamelCase__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) UpperCamelCase__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) UpperCamelCase__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model.generate( **SCREAMING_SNAKE_CASE_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE_ , ) UpperCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
86
1
from __future__ import annotations from statistics import mean def __magic_name__ ( __a : list[int] , __a : list[int] , __a : int ): '''simple docstring''' UpperCamelCase__ = [0] * no_of_processes UpperCamelCase__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__a ): UpperCamelCase__ = burst_time[i] UpperCamelCase__ = [] UpperCamelCase__ = 0 UpperCamelCase__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: UpperCamelCase__ = [] UpperCamelCase__ = -1 for i in range(__a ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__a ) if len(__a ) > 0: UpperCamelCase__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: UpperCamelCase__ = i total_time += burst_time[target_process] completed += 1 UpperCamelCase__ = 0 UpperCamelCase__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def __magic_name__ ( __a : list[int] , __a : int , __a : list[int] ): '''simple docstring''' UpperCamelCase__ = [0] * no_of_processes for i in range(__a ): UpperCamelCase__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('''[TEST CASE 01]''') lowerCamelCase_ = 4 lowerCamelCase_ = [2, 5, 3, 7] lowerCamelCase_ = [0, 0, 0, 0] lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase_ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''') for i, process_id in enumerate(list(range(1, 5))): print( f'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t' f'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}' ) print(f'\nAverage waiting time = {mean(waiting_time):.5f}') print(f'Average turnaround time = {mean(turn_around_time):.5f}')
86
from PIL import Image def __magic_name__ ( __a : Image , __a : float ): '''simple docstring''' def brightness(__a : int ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 lowerCamelCase_ = change_brightness(img, 1_00) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
86
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionPanoramaPipeline SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase_ (self ): torch.manual_seed(0 ) UpperCamelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) UpperCamelCase__ = DDIMScheduler() torch.manual_seed(0 ) UpperCamelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) UpperCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCamelCase__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ): UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ (self ): UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase_ (self ): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def UpperCAmelCase_ (self ): UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = """french fries""" UpperCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = output.images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE_ , view_batch_size=2 ) UpperCamelCase__ = output.images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) UpperCamelCase__ = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCamelCase__ = self.get_dummy_components() UpperCamelCase__ = PNDMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = StableDiffusionPanoramaPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images UpperCamelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCamelCase__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __A( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=0 ): UpperCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def UpperCAmelCase_ (self ): UpperCamelCase__ = """stabilityai/stable-diffusion-2-base""" UpperCamelCase__ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" ) UpperCamelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() UpperCamelCase__ = self.get_inputs() UpperCamelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ).images UpperCamelCase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) UpperCamelCase__ = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def UpperCAmelCase_ (self ): UpperCamelCase__ = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() UpperCamelCase__ = self.get_inputs() UpperCamelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ).images UpperCamelCase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) UpperCamelCase__ = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def UpperCAmelCase_ (self ): UpperCamelCase__ = 0 def callback_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: UpperCamelCase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCamelCase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) UpperCamelCase__ = latents[0, -3:, -3:, -1] UpperCamelCase__ = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: UpperCamelCase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) UpperCamelCase__ = latents[0, -3:, -3:, -1] UpperCamelCase__ = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 UpperCamelCase__ = False UpperCamelCase__ = """stabilityai/stable-diffusion-2-base""" UpperCamelCase__ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" ) UpperCamelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing() UpperCamelCase__ = self.get_inputs() pipe(**SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCAmelCase_ (self ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCamelCase__ = """stabilityai/stable-diffusion-2-base""" UpperCamelCase__ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder="""scheduler""" ) UpperCamelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCamelCase__ = self.get_inputs() UpperCamelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
86
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 100_000] number //= 100_000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCamelCase_ = [None] * 10_00_00_00 lowerCamelCase_ = True lowerCamelCase_ = False def __magic_name__ ( __a : int ): '''simple docstring''' if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore UpperCamelCase__ = chain(next_number(__a ) ) UpperCamelCase__ = number_chain while number < 10_000_000: UpperCamelCase__ = number_chain number *= 10 return number_chain def __magic_name__ ( __a : int = 10_000_000 ): '''simple docstring''' for i in range(1 , __a ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__a ) if __name__ == "__main__": import doctest doctest.testmod() print(f'{solution() = }')
86
1
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } lowerCamelCase_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __magic_name__ ( __a : Tuple , __a : Optional[int] , __a : List[str] , __a : Optional[int] , __a : Dict ): '''simple docstring''' for attribute in key.split(""".""" ): UpperCamelCase__ = getattr(__a , __a ) if weight_type is not None: UpperCamelCase__ = getattr(__a , __a ).shape else: UpperCamelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": UpperCamelCase__ = value elif weight_type == "weight_g": UpperCamelCase__ = value elif weight_type == "weight_v": UpperCamelCase__ = value elif weight_type == "bias": UpperCamelCase__ = value else: UpperCamelCase__ = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __magic_name__ ( __a : List[str] , __a : Any ): '''simple docstring''' UpperCamelCase__ = [] UpperCamelCase__ = fairseq_model.state_dict() UpperCamelCase__ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight UpperCamelCase__ = None for name, value in fairseq_dict.items(): UpperCamelCase__ = False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == """group""" , ) UpperCamelCase__ = True elif name.split(""".""" )[0] == "proj": UpperCamelCase__ = fairseq_model.proj UpperCamelCase__ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: UpperCamelCase__ = True if "*" in mapped_key: UpperCamelCase__ = name.split(__a )[0].split(""".""" )[-2] UpperCamelCase__ = mapped_key.replace("""*""" , __a ) if "weight_g" in name: UpperCamelCase__ = """weight_g""" elif "weight_v" in name: UpperCamelCase__ = """weight_v""" elif "bias" in name: UpperCamelCase__ = """bias""" elif "weight" in name: UpperCamelCase__ = """weight""" else: UpperCamelCase__ = None set_recursively(__a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(f"Unused weights: {unused_weights}" ) return proj_weight def __magic_name__ ( __a : Dict , __a : Union[str, Any] , __a : Tuple , __a : Dict , __a : int ): '''simple docstring''' UpperCamelCase__ = full_name.split("""conv_layers.""" )[-1] UpperCamelCase__ = name.split(""".""" ) UpperCamelCase__ = int(items[0] ) UpperCamelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) UpperCamelCase__ = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) UpperCamelCase__ = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) UpperCamelCase__ = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) UpperCamelCase__ = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__a ) def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' with open(__a , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase__ = f.readlines() UpperCamelCase__ = [line.split(""" """ )[0] for line in lines] UpperCamelCase__ = len(__a ) UpperCamelCase__ = { """<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3, } vocab_dict.update(dict(zip(__a , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __magic_name__ ( __a : Optional[int] , __a : int , __a : Union[str, Any] , __a : Dict , __a : Dict , __a : str , __a : List[str] , ): '''simple docstring''' UpperCamelCase__ = WavaVecaConfig.from_pretrained(__a ) UpperCamelCase__ = SpeechaTextaConfig.from_pretrained( __a , vocab_size=__a , decoder_layers=__a , do_stable_layer_norm=__a ) UpperCamelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) UpperCamelCase__ = model[0].eval() # set weights for wav2vec2 encoder UpperCamelCase__ = WavaVecaModel(__a ) UpperCamelCase__ = recursively_load_weights_wavaveca(model.encoder , __a ) UpperCamelCase__ = SpeechaTextaForCausalLM(__a ) UpperCamelCase__ , UpperCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__a ) # set output linear layer unexpected_keys.remove("""embed_out""" ) UpperCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" ) logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" ) UpperCamelCase__ = SpeechEncoderDecoderModel(encoder=__a , decoder=__a ) UpperCamelCase__ = False # add projection layer UpperCamelCase__ = nn.Parameter(projection_layer.weight ) UpperCamelCase__ = nn.Parameter(projection_layer.bias ) UpperCamelCase__ = create_vocab_dict(__a ) with open(os.path.join(__a , """vocab.json""" ) , """w""" ) as fp: json.dump(__a , __a ) UpperCamelCase__ = SpeechaTextaTokenizer(os.path.join(__a , """vocab.json""" ) ) tokenizer.save_pretrained(__a ) UpperCamelCase__ = hf_wavavec.config.to_dict() UpperCamelCase__ = tokenizer.pad_token_id UpperCamelCase__ = tokenizer.bos_token_id UpperCamelCase__ = tokenizer.eos_token_id UpperCamelCase__ = """speech_to_text_2""" UpperCamelCase__ = """wav2vec2""" UpperCamelCase__ = SpeechEncoderDecoderConfig.from_dict(__a ) hf_wavavec.save_pretrained(__a ) feature_extractor.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_02_24, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') lowerCamelCase_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
86
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCamelCase_ = { '''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''', '''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''', '''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''', '''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''', '''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''', '''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''', '''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''', '''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''', '''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''', '''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''', } def __magic_name__ ( __a : List[str] ): '''simple docstring''' UpperCamelCase__ = ["""layers""", """blocks"""] for k in ignore_keys: state_dict.pop(__a , __a ) lowerCamelCase_ = { '''blocks''': '''layers''', '''mlp.0''': '''fc1''', '''mlp.2''': '''fc2''', '''mlp_ln''': '''final_layer_norm''', '''.attn.query''': '''.self_attn.q_proj''', '''.attn.key''': '''.self_attn.k_proj''', '''.attn.value''': '''.self_attn.v_proj''', '''.attn_ln''': '''.self_attn_layer_norm''', '''.attn.out''': '''.self_attn.out_proj''', '''.cross_attn.query''': '''.encoder_attn.q_proj''', '''.cross_attn.key''': '''.encoder_attn.k_proj''', '''.cross_attn.value''': '''.encoder_attn.v_proj''', '''.cross_attn_ln''': '''.encoder_attn_layer_norm''', '''.cross_attn.out''': '''.encoder_attn.out_proj''', '''decoder.ln.''': '''decoder.layer_norm.''', '''encoder.ln.''': '''encoder.layer_norm.''', '''token_embedding''': '''embed_tokens''', '''encoder.positional_embedding''': '''encoder.embed_positions.weight''', '''decoder.positional_embedding''': '''decoder.embed_positions.weight''', '''ln_post''': '''layer_norm''', } def __magic_name__ ( __a : Dict ): '''simple docstring''' UpperCamelCase__ = list(s_dict.keys() ) for key in keys: UpperCamelCase__ = key for k, v in WHISPER_MAPPING.items(): if k in key: UpperCamelCase__ = new_key.replace(__a , __a ) print(f"{key} -> {new_key}" ) UpperCamelCase__ = s_dict.pop(__a ) return s_dict def __magic_name__ ( __a : Optional[Any] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape UpperCamelCase__ = nn.Linear(__a , __a , bias=__a ) UpperCamelCase__ = emb.weight.data return lin_layer def __magic_name__ ( __a : str , __a : str ): '''simple docstring''' os.makedirs(__a , exist_ok=__a ) UpperCamelCase__ = os.path.basename(__a ) UpperCamelCase__ = url.split("""/""" )[-2] UpperCamelCase__ = os.path.join(__a , __a ) if os.path.exists(__a ) and not os.path.isfile(__a ): raise RuntimeError(f"{download_target} exists and is not a regular file" ) if os.path.isfile(__a ): UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(__a ) as source, open(__a , """wb""" ) as output: with tqdm( total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=__a , unit_divisor=1_024 ) as loop: while True: UpperCamelCase__ = source.read(8_192 ) if not buffer: break output.write(__a ) loop.update(len(__a ) ) UpperCamelCase__ = open(__a , """rb""" ).read() if hashlib.shaaaa(__a ).hexdigest() != expected_shaaaa: raise RuntimeError( """Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" ) return model_bytes def __magic_name__ ( __a : Union[str, Any] , __a : Optional[int] ): '''simple docstring''' if ".pt" not in checkpoint_path: UpperCamelCase__ = _download(_MODELS[checkpoint_path] ) else: UpperCamelCase__ = torch.load(__a , map_location="""cpu""" ) UpperCamelCase__ = original_checkpoint["""dims"""] UpperCamelCase__ = original_checkpoint["""model_state_dict"""] UpperCamelCase__ = state_dict["""decoder.token_embedding.weight"""] remove_ignore_keys_(__a ) rename_keys(__a ) UpperCamelCase__ = True UpperCamelCase__ = state_dict["""decoder.layers.0.fc1.weight"""].shape[0] UpperCamelCase__ = WhisperConfig( vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=__a , decoder_ffn_dim=__a , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , ) UpperCamelCase__ = WhisperForConditionalGeneration(__a ) UpperCamelCase__ , UpperCamelCase__ = model.model.load_state_dict(__a , strict=__a ) if len(__a ) > 0 and not set(__a ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f" but all the following weights are missing {missing}" ) if tie_embeds: UpperCamelCase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCamelCase__ = proj_out_weights model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
86
1
import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = """detr""" SCREAMING_SNAKE_CASE__ = ["""past_key_values"""] SCREAMING_SNAKE_CASE__ = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__(self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=1_00 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , ): if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = backbone_config.get("""model_type""" ) UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type] UpperCamelCase__ = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) # set timm attributes to None UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None, None, None UpperCamelCase__ = use_timm_backbone UpperCamelCase__ = backbone_config UpperCamelCase__ = num_channels UpperCamelCase__ = num_queries UpperCamelCase__ = d_model UpperCamelCase__ = encoder_ffn_dim UpperCamelCase__ = encoder_layers UpperCamelCase__ = encoder_attention_heads UpperCamelCase__ = decoder_ffn_dim UpperCamelCase__ = decoder_layers UpperCamelCase__ = decoder_attention_heads UpperCamelCase__ = dropout UpperCamelCase__ = attention_dropout UpperCamelCase__ = activation_dropout UpperCamelCase__ = activation_function UpperCamelCase__ = init_std UpperCamelCase__ = init_xavier_std UpperCamelCase__ = encoder_layerdrop UpperCamelCase__ = decoder_layerdrop UpperCamelCase__ = encoder_layers UpperCamelCase__ = auxiliary_loss UpperCamelCase__ = position_embedding_type UpperCamelCase__ = backbone UpperCamelCase__ = use_pretrained_backbone UpperCamelCase__ = dilation # Hungarian matcher UpperCamelCase__ = class_cost UpperCamelCase__ = bbox_cost UpperCamelCase__ = giou_cost # Loss coefficients UpperCamelCase__ = mask_loss_coefficient UpperCamelCase__ = dice_loss_coefficient UpperCamelCase__ = bbox_loss_coefficient UpperCamelCase__ = giou_loss_coefficient UpperCamelCase__ = eos_coefficient super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def UpperCAmelCase_ (self ): return self.encoder_attention_heads @property def UpperCAmelCase_ (self ): return self.d_model @classmethod def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return cls(backbone_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCamelCase__ = self.backbone_config.to_dict() UpperCamelCase__ = self.__class__.model_type return output class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = version.parse("""1.11""" ) @property def UpperCAmelCase_ (self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def UpperCAmelCase_ (self ): return 1E-5 @property def UpperCAmelCase_ (self ): return 12
86
def __magic_name__ ( __a : int ): '''simple docstring''' UpperCamelCase__ = [[0 for _ in range(__a )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCamelCase__ = 1 for n in range(m + 1 ): for k in range(1 , __a ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase_ = int(input('''Enter a number: ''').strip()) print(partition(n)) except ValueError: print('''Please enter a number.''') else: try: lowerCamelCase_ = int(sys.argv[1]) print(partition(n)) except ValueError: print('''Please pass a number.''')
86
1