code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowercase_ : @staticmethod def __UpperCamelCase ( *lowercase_ , **lowercase_) -> str: pass @is_pipeline_test @require_vision class lowercase_ (unittest.TestCase ): @require_torch def __UpperCamelCase ( self) -> Union[str, Any]: a__ =pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , ) a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') a__ =image_classifier(lowercase_ , candidate_labels=['a', 'b', 'c']) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(lowercase_) , [ [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}], ] , ) a__ =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2) self.assertEqual( nested_simplify(lowercase_) , [ [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], ] , ) @require_tf def __UpperCamelCase ( self) -> Optional[int]: a__ =pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf') a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') a__ =image_classifier(lowercase_ , candidate_labels=['a', 'b', 'c']) self.assertEqual( nested_simplify(lowercase_) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , ) a__ =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2) self.assertEqual( nested_simplify(lowercase_) , [ [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], [ {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, {'score': 0.3_33, 'label': ANY(lowercase_)}, ], ] , ) @slow @require_torch def __UpperCamelCase ( self) -> List[str]: a__ =pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , ) # This is an image of 2 cats with remotes and no planes a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') a__ =image_classifier(lowercase_ , candidate_labels=['cat', 'plane', 'remote']) self.assertEqual( nested_simplify(lowercase_) , [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ] , ) a__ =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2) self.assertEqual( nested_simplify(lowercase_) , [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5 , ) @slow @require_tf def __UpperCamelCase ( self) -> List[Any]: a__ =pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf') # This is an image of 2 cats with remotes and no planes a__ =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') a__ =image_classifier(lowercase_ , candidate_labels=['cat', 'plane', 'remote']) self.assertEqual( nested_simplify(lowercase_) , [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ] , ) a__ =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2) self.assertEqual( nested_simplify(lowercase_) , [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5 , )
20
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
64
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=True ): model.train() __magic_name__ : Dict =model(lowerCamelCase ) __magic_name__ : int =F.mse_loss(lowerCamelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(lowerCamelCase ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=False ): set_seed(42 ) __magic_name__ : Optional[int] =RegressionModel() __magic_name__ : Optional[Any] =deepcopy(lowerCamelCase ) __magic_name__ : List[str] =RegressionDataset(length=80 ) __magic_name__ : Dict =DataLoader(lowerCamelCase , batch_size=16 ) model.to(accelerator.device ) if sched: __magic_name__ : Optional[Any] =AdamW(params=model.parameters() , lr=1E-3 ) __magic_name__ : Optional[Any] =AdamW(params=ddp_model.parameters() , lr=1E-3 ) __magic_name__ : Optional[int] =LambdaLR(lowerCamelCase , lr_lambda=lambda lowerCamelCase : epoch**0.6_5 ) __magic_name__ : Dict =LambdaLR(lowerCamelCase , lr_lambda=lambda lowerCamelCase : epoch**0.6_5 ) # Make a copy of `model` if sched: __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict =accelerator.prepare(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __magic_name__ , __magic_name__ : int =accelerator.prepare(lowerCamelCase , lowerCamelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( lowerCamelCase ): # Test when on a single CPU or GPU that the context manager does nothing __magic_name__ , __magic_name__ , __magic_name__ : Dict =get_training_setup(lowerCamelCase ) # Use a single batch __magic_name__ , __magic_name__ : Optional[Any] =next(iter(lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __magic_name__ , __magic_name__ : Any =accelerator.gather((ddp_input, ddp_target) ) __magic_name__ , __magic_name__ : str =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCamelCase ): step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: # Sync grads step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __magic_name__ : Dict =ddp_input[torch.randperm(len(lowerCamelCase ) )] def lowerCAmelCase_ ( lowerCamelCase ): # Test on distributed setup that context manager behaves properly __magic_name__ , __magic_name__ , __magic_name__ : Dict =get_training_setup(lowerCamelCase ) # Use a single batch __magic_name__ , __magic_name__ : Any =next(iter(lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __magic_name__ , __magic_name__ : Tuple =accelerator.gather((ddp_input, ddp_target) ) __magic_name__ , __magic_name__ : Any =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(lowerCamelCase ): step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: # Sync grads step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __magic_name__ : Optional[Any] =ddp_input[torch.randperm(len(lowerCamelCase ) )] def lowerCAmelCase_ ( lowerCamelCase=False , lowerCamelCase=False ): __magic_name__ : List[Any] =Accelerator( split_batches=lowerCamelCase , dispatch_batches=lowerCamelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __magic_name__ , __magic_name__ , __magic_name__ : str =get_training_setup(lowerCamelCase ) for iteration, batch in enumerate(lowerCamelCase ): __magic_name__ , __magic_name__ : List[Any] =batch.values() # Gather the distributed inputs and targs for the base model __magic_name__ , __magic_name__ : Optional[Any] =accelerator.gather((ddp_input, ddp_target) ) __magic_name__ , __magic_name__ : str =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(lowerCamelCase ): step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCamelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __magic_name__ : List[Any] =ddp_input[torch.randperm(len(lowerCamelCase ) )] GradientState._reset_state() def lowerCAmelCase_ ( lowerCamelCase=False , lowerCamelCase=False ): __magic_name__ : Union[str, Any] =Accelerator( split_batches=lowerCamelCase , dispatch_batches=lowerCamelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Any =get_training_setup(lowerCamelCase , lowerCamelCase ) for iteration, batch in enumerate(lowerCamelCase ): __magic_name__ , __magic_name__ : str =batch.values() # Gather the distributed inputs and targs for the base model __magic_name__ , __magic_name__ : List[Any] =accelerator.gather((ddp_input, ddp_target) ) __magic_name__ , __magic_name__ : List[Any] =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCamelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(lowerCamelCase ): step_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n" __magic_name__ : Union[str, Any] =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCamelCase )) if accelerator.num_processes > 1: check_model_parameters(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ): __magic_name__ : Optional[int] =Accelerator() __magic_name__ : List[Any] =RegressionDataset(length=80 ) __magic_name__ : Union[str, Any] =DataLoader(lowerCamelCase , batch_size=16 ) __magic_name__ : Tuple =RegressionDataset(length=96 ) __magic_name__ : Dict =DataLoader(lowerCamelCase , batch_size=16 ) __magic_name__ , __magic_name__ : Union[str, Any] =accelerator.prepare(lowerCamelCase , lowerCamelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase ) if iteration < len(lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCamelCase ) if batch_num < len(lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ): __magic_name__ : str =Accelerator() __magic_name__ : str =accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(lowerCamelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(lowerCamelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation(lowerCamelCase , lowerCamelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation_with_opt_and_scheduler(lowerCamelCase , lowerCamelCase ) def lowerCAmelCase_ ( lowerCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
21
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ : Optional[int] = logging.get_logger(__name__) def A__ ( snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights SCREAMING_SNAKE_CASE__: List[str]= [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: str= { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: int= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE__: int= sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' ) SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE__: List[Any]= q SCREAMING_SNAKE_CASE__: Any= k SCREAMING_SNAKE_CASE__: Optional[Any]= v del sd[key] return sd @torch.no_grad() def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ): SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ ) if config is not None: SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ ) else: SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig() SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval() model.load_state_dict(snake_case_ ) # Check results Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') lowercase_ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
64
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : str , *lowerCAmelCase_ : str , **lowerCAmelCase_ : List[Any] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> str: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : List[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Any , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : int , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : int ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Any , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : List[Any] ) -> List[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Any ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : int , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : str ) -> int: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Tuple , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : str , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Optional[Any] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : List[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Any , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : str ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : str , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : List[Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Dict , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Any , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Dict , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any] ) -> str: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : Dict , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : str ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class A ( metaclass=_a ): lowercase_ = ['flax'] def __init__( self : List[str] , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> Dict: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Union[str, Any] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''flax'''] )
22
def A__ ( snake_case_ : float , snake_case_ : float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
64
0
from __future__ import annotations snake_case__ : Any = list[list[int]] # assigning initial values to the grid snake_case__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution snake_case__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase): for i in range(9): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3): for j in range(3): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _snake_case (__lowercase): for i in range(9): for j in range(9): if grid[i][j] == 0: return i, j return None def _snake_case (__lowercase): if location := find_empty_location(__lowercase): UpperCamelCase_ , UpperCamelCase_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10): if is_safe(__lowercase , __lowercase , __lowercase , __lowercase): UpperCamelCase_ = digit if sudoku(__lowercase) is not None: return grid UpperCamelCase_ = 0 return None def _snake_case (__lowercase): for row in grid: for cell in row: print(__lowercase , end=' ') print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 2_0) print_solution(example_grid) print("""\nExample grid solution:""") snake_case__ : Any = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
23
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Any = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowerCAmelCase ( __lowerCAmelCase): def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Optional[Any]: '''simple docstring''' __snake_case = path_or_paths __snake_case = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else '''train''' __snake_case = features __snake_case = cache_dir __snake_case = keep_in_memory __snake_case = streaming __snake_case = num_proc __snake_case = kwargs @abstractmethod def lowerCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: '''simple docstring''' pass class lowerCAmelCase ( __lowerCAmelCase): def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: '''simple docstring''' __snake_case = features __snake_case = cache_dir __snake_case = keep_in_memory __snake_case = streaming __snake_case = num_proc __snake_case = kwargs @abstractmethod def lowerCAmelCase ( self ) -> Union[Dataset, IterableDataset]: '''simple docstring''' pass
24
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __a = Features({"text": Value("string" )} ) __a = Features({"labels": ClassLabel} ) __a = "text" __a = "labels" def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self ) SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy() SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column] SCREAMING_SNAKE_CASE__: List[str]= label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
64
0
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE : Optional[Any] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(_a , _a) def lowerCamelCase__ ( _a): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = emb.weight.shape SCREAMING_SNAKE_CASE : int = nn.Linear(_a , _a , bias=_a) SCREAMING_SNAKE_CASE : str = emb.weight.data return lin_layer def lowerCamelCase__ ( _a , _a="facebook/mbart-large-en-ro" , _a=False , _a=False): SCREAMING_SNAKE_CASE : List[str] = torch.load(_a , map_location="cpu")["model"] remove_ignore_keys_(_a) SCREAMING_SNAKE_CASE : List[Any] = state_dict["encoder.embed_tokens.weight"].shape[0] SCREAMING_SNAKE_CASE : Optional[int] = MBartConfig.from_pretrained(_a , vocab_size=_a) if mbart_aa and finetuned: SCREAMING_SNAKE_CASE : int = "relu" SCREAMING_SNAKE_CASE : Optional[Any] = state_dict["decoder.embed_tokens.weight"] SCREAMING_SNAKE_CASE : Optional[Any] = MBartForConditionalGeneration(_a) model.model.load_state_dict(_a) if finetuned: SCREAMING_SNAKE_CASE : str = make_linear_from_emb(model.model.shared) return model if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') a_ = parser.parse_args() a_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
25
import inspect import unittest class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion''' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE__: int= '''invisible-watermark''' assert backend in deps, f'{backend} is not in the deps table!'
64
0
'''simple docstring''' from __future__ import annotations def _a ( _lowerCamelCase ) -> None: """simple docstring""" create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None: """simple docstring""" if index == len(_lowerCamelCase ): print(_lowerCamelCase ) return for i in range(len(_lowerCamelCase ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __snake_case : Tuple = True create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase ) current_sequence.pop() __snake_case : str = False __UpperCamelCase = [3, 1, 2, 4] generate_all_permutations(sequence) __UpperCamelCase = ["A", "B", "C"] generate_all_permutations(sequence_a)
26
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , ) assert hasattr(self , '''env''' ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: # configuration for running training on smdistributed Model Parallel SCREAMING_SNAKE_CASE__: Optional[Any]= { '''enabled''': True, '''processes_per_host''': 8, } SCREAMING_SNAKE_CASE__: Dict= { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: # create estimator SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase ) # run training estimator.fit() # result dataframe SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping SCREAMING_SNAKE_CASE__: List[Any]= ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
64
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available __A : Any = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
27
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): @property def UpperCamelCase_ ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions() SCREAMING_SNAKE_CASE__: List[str]= False return options def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__: int= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__: Tuple= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__: Any= pipe( prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__: Any= output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
64
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal UpperCamelCase_ = datasets.utils.logging.get_logger(__name__) UpperCamelCase_ = ["names", "prefix"] UpperCamelCase_ = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] UpperCamelCase_ = ["encoding_errors", "on_bad_lines"] UpperCamelCase_ = ["date_format"] @dataclass class _a ( datasets.BuilderConfig ): '''simple docstring''' A : str = "," A : Optional[str] = None A : Optional[Union[int, List[int], str]] = "infer" A : Optional[List[str]] = None A : Optional[List[str]] = None A : Optional[Union[int, str, List[int], List[str]]] = None A : Optional[Union[List[int], List[str]]] = None A : Optional[str] = None A : bool = True A : Optional[Literal["c", "python", "pyarrow"]] = None A : Dict[Union[int, str], Callable[[Any], Any]] = None A : Optional[list] = None A : Optional[list] = None A : bool = False A : Optional[Union[int, List[int]]] = None A : Optional[int] = None A : Optional[Union[str, List[str]]] = None A : bool = True A : bool = True A : bool = False A : bool = True A : Optional[str] = None A : str = "." A : Optional[str] = None A : str = '"' A : int = 0 A : Optional[str] = None A : Optional[str] = None A : Optional[str] = None A : Optional[str] = None A : bool = True A : bool = True A : int = 0 A : bool = True A : bool = False A : Optional[str] = None A : int = 10_000 A : Optional[datasets.Features] = None A : Optional[str] = "strict" A : Literal["error", "warn", "skip"] = "error" A : Optional[str] = None def UpperCamelCase_ ( self ): '''simple docstring''' if self.delimiter is not None: SCREAMING_SNAKE_CASE : List[str] = self.delimiter if self.column_names is not None: SCREAMING_SNAKE_CASE : Tuple = self.column_names @property def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), A ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _a ( datasets.ArrowBasedBuilder ): '''simple docstring''' A : Tuple = CsvConfig def UpperCamelCase_ ( self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ ( self, A ): '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A, (str, list, tuple) ): SCREAMING_SNAKE_CASE : List[Any] = data_files if isinstance(A, A ): SCREAMING_SNAKE_CASE : Dict = [files] SCREAMING_SNAKE_CASE : int = [dl_manager.iter_files(A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files} )] SCREAMING_SNAKE_CASE : Optional[int] = [] for split_name, files in data_files.items(): if isinstance(A, A ): SCREAMING_SNAKE_CASE : List[Any] = [files] SCREAMING_SNAKE_CASE : List[str] = [dl_manager.iter_files(A ) for file in files] splits.append(datasets.SplitGenerator(name=A, gen_kwargs={'files': files} ) ) return splits def UpperCamelCase_ ( self, A ): '''simple docstring''' if self.config.features is not None: SCREAMING_SNAKE_CASE : Optional[Any] = self.config.features.arrow_schema if all(not require_storage_cast(A ) for feature in self.config.features.values() ): # cheaper cast SCREAMING_SNAKE_CASE : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=A ) else: # more expensive cast; allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(A, A ) return pa_table def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str SCREAMING_SNAKE_CASE : List[Any] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A ) else object for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A ) ): SCREAMING_SNAKE_CASE : Any = pd.read_csv(A, iterator=A, dtype=A, **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A ): SCREAMING_SNAKE_CASE : Dict = pa.Table.from_pandas(A ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(A )}: {e}" ) raise
28
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **lowerCAmelCase ) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:] setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript ) SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**lowerCAmelCase ) __a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) __a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) __a = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 elif is_torch_tpu_available(): SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device() SCREAMING_SNAKE_CASE__: Any= 0 else: SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count() return device, n_gpu @property def UpperCamelCase_ ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCamelCase_ ( self ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def UpperCamelCase_ ( self ) -> str: return self.n_gpu > 0
64
0
"""simple docstring""" # Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase_ = { '''wmt16-en-de-dist-12-1''': [28.3, 27.52], '''wmt16-en-de-dist-6-1''': [27.4, 27.11], '''wmt16-en-de-12-1''': [26.9, 25.75], } lowerCamelCase_ = f"{src_lang}-{tgt_lang}" lowerCamelCase_ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n" model_card_dir.mkdir(parents=lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ ) lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' ) print(f"Generating {path}" ) with open(lowerCAmelCase__ ,'''w''' ,encoding='''utf-8''' ) as f: f.write(lowerCAmelCase__ ) # make sure we are under the root of the project A_ = Path(__file__).resolve().parent.parent.parent A_ = repo_dir / """model_cards""" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: A_ = model_cards_dir / """allenai""" / model_name write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
29
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str: SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30} SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} SCREAMING_SNAKE_CASE__: Dict= parent SCREAMING_SNAKE_CASE__: List[str]= batch_size SCREAMING_SNAKE_CASE__: int= num_channels SCREAMING_SNAKE_CASE__: int= min_resolution SCREAMING_SNAKE_CASE__: List[Any]= max_resolution SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop SCREAMING_SNAKE_CASE__: Union[str, Any]= size SCREAMING_SNAKE_CASE__: Dict= crop_pct SCREAMING_SNAKE_CASE__: Optional[int]= crop_size SCREAMING_SNAKE_CASE__: Dict= do_normalize SCREAMING_SNAKE_CASE__: List[str]= image_mean SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std def UpperCamelCase_ ( self ) -> Tuple: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase_ ( self ) -> Tuple: pass def UpperCamelCase_ ( self ) -> Optional[int]: # Initialize image_processing SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> int: # Initialize image_processing SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
64
0
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": __a = argparse.ArgumentParser( description=( 'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2']) parser.add_argument('--model_name', default='roberta-large', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') __a = parser.parse_args() if args.model_type == "roberta": __a = RobertaForMaskedLM.from_pretrained(args.model_name) __a = 'roberta' elif args.model_type == "gpt2": __a = GPTaLMHeadModel.from_pretrained(args.model_name) __a = 'transformer' __a = model.state_dict() __a = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: __a = state_dict[F"""{prefix}.{param_name}"""] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: __a = F"""{prefix}.embeddings.{w}.weight""" __a = state_dict[param_name] for w in ["weight", "bias"]: __a = F"""{prefix}.embeddings.LayerNorm.{w}""" __a = state_dict[param_name] # Transformer Blocks # __a = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: __a = state_dict[ F"""{prefix}.h.{teacher_idx}.{layer}.{w}""" ] __a = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: __a = state_dict[ F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}""" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: __a = state_dict[F"""{layer}"""] if args.vocab_transform: for w in ["weight", "bias"]: __a = state_dict[F"""lm_head.dense.{w}"""] __a = state_dict[F"""lm_head.layer_norm.{w}"""] elif args.model_type == "gpt2": for w in ["weight", "bias"]: __a = state_dict[F"""{prefix}.ln_f.{w}"""] __a = state_dict['lm_head.weight'] print(F"""N layers selected for distillation: {std_idx}""") print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
30
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase_ : Tuple = 3 def A__ ( snake_case_ : int ): print('''Generating primitive root of p''' ) while True: SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ ) if pow(snake_case_ , 2 , snake_case_ ) == 1: continue if pow(snake_case_ , snake_case_ , snake_case_ ) == 1: continue return g def A__ ( snake_case_ : int ): print('''Generating prime p...''' ) SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number. SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d) return public_key, private_key def A__ ( snake_case_ : str , snake_case_ : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , '''w''' ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def A__ ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
64
0
class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any ): SCREAMING_SNAKE_CASE_ = name SCREAMING_SNAKE_CASE_ = value SCREAMING_SNAKE_CASE_ = weight def __repr__( self : int ): return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def lowerCAmelCase_ ( self : Any ): return self.value def lowerCAmelCase_ ( self : Optional[int] ): return self.name def lowerCAmelCase_ ( self : Dict ): return self.weight def lowerCAmelCase_ ( self : List[str] ): return self.value / self.weight def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = [] for i in range(len(__UpperCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE_ = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0, 0.0 for i in range(len(__UpperCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCAmelCase_ ( ) -> List[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
31
from math import factorial def A__ ( snake_case_ : int , snake_case_ : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(5_2, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(4_0, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(1_0, 3)} ways that first, second and''', 'third place can be awarded.', )
64
0
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]: """simple docstring""" _UpperCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _UpperCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _UpperCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
32
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ : Dict = random.Random() if is_torch_available(): import torch def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ): if rng is None: SCREAMING_SNAKE_CASE__: Tuple= global_rng SCREAMING_SNAKE_CASE__: List[str]= [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]: SCREAMING_SNAKE_CASE__: Optional[Any]= parent SCREAMING_SNAKE_CASE__: Dict= batch_size SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length SCREAMING_SNAKE_CASE__: Dict= max_seq_length SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__: Dict= feature_size SCREAMING_SNAKE_CASE__: str= padding_value SCREAMING_SNAKE_CASE__: Dict= sampling_rate SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask SCREAMING_SNAKE_CASE__: str= do_normalize def UpperCamelCase_ ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict: def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__: int= [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = ASTFeatureExtractor def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self ) def UpperCamelCase_ ( self ) -> Any: # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) @require_torch def UpperCamelCase_ ( self ) -> Dict: import torch SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]: from datasets import load_dataset SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def UpperCamelCase_ ( self ) -> str: # fmt: off SCREAMING_SNAKE_CASE__: str= torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor() SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
64
0
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int: snake_case__ = (n * (n + 1) // 2) ** 2 snake_case__ = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F"""{solution() = }""")
33
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase_ : List[Any] = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
"""simple docstring""" from math import pow def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,): """simple docstring""" if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count UpperCamelCase = int(pow(_lowercase ,_lowercase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n UpperCamelCase , UpperCamelCase = backtrack( _lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. UpperCamelCase , UpperCamelCase = backtrack( _lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase ) return current_sum, solutions_count def __snake_case ( _lowercase ,_lowercase ): """simple docstring""" if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(_lowercase ,_lowercase ,1 ,0 ,0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
34
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def A__ ( ): SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , ) SCREAMING_SNAKE_CASE__: Any= parser.parse_args() return args def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ): if not len(snake_case_ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size for i, img in enumerate(snake_case_ ): grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) ) return grid def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ): SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= pipeline( snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowercase_ : List[str] = parse_args() # Load models and create wrapper for stable diffusion lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') lowercase_ : Dict = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowercase_ : str = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id)) lowercase_ : str = pipeline.to(unet.device) lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
64
0
import baseaa def a ( A__ ) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode('''utf-8''' ) ) def a ( A__ ) -> str: '''simple docstring''' return baseaa.baadecode(A__ ).decode('''utf-8''' ) if __name__ == "__main__": a_ :Tuple = 'Hello World!' a_ :List[str] = baseaa_encode(test) print(encoded) a_ :Optional[int] = baseaa_decode(encoded) print(decoded)
35
from __future__ import annotations from collections import deque class _lowerCamelCase : def __init__( self , lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: list[dict]= [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(lowerCAmelCase ) self.set_fail_transitions() def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase_ ( self , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: str= 0 for character in keyword: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1 else: SCREAMING_SNAKE_CASE__: List[Any]= next_state self.adlist[current_state]["output"].append(lowerCAmelCase ) def UpperCamelCase_ ( self ) -> None: SCREAMING_SNAKE_CASE__: deque= deque() for node in self.adlist[0]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= 0 while q: SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state'''] while ( self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None and state != 0 ): SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Dict= self.find_next_state( lowerCAmelCase , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 SCREAMING_SNAKE_CASE__: str= ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]: SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences SCREAMING_SNAKE_CASE__: Optional[Any]= 0 for i in range(len(lowerCAmelCase ) ): while ( self.find_next_state(lowerCAmelCase , string[i] ) is None and current_state != 0 ): SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] ) if next_state is None: SCREAMING_SNAKE_CASE__: List[Any]= 0 else: SCREAMING_SNAKE_CASE__: Dict= next_state for key in self.adlist[current_state]["output"]: if key not in result: SCREAMING_SNAKE_CASE__: Optional[Any]= [] result[key].append(i - len(lowerCAmelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
64
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : torch.FloatTensor __lowerCamelCase : torch.FloatTensor __lowerCamelCase : Optional[torch.FloatTensor] = None class _A ( snake_case , snake_case ): '''simple docstring''' __lowerCamelCase : str = 2 @register_to_config def __init__( self ,SCREAMING_SNAKE_CASE_ = 0.02 ,SCREAMING_SNAKE_CASE_ = 100 ,SCREAMING_SNAKE_CASE_ = 1.0_07 ,SCREAMING_SNAKE_CASE_ = 80 ,SCREAMING_SNAKE_CASE_ = 0.05 ,SCREAMING_SNAKE_CASE_ = 50 ,): '''simple docstring''' # standard deviation of the initial noise distribution snake_case : Union[str, Any] = sigma_max # setable values snake_case : int = None snake_case : np.IntTensor = None snake_case : torch.FloatTensor = None # sigma(t_i) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' return sample def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' snake_case : Dict = num_inference_steps snake_case : List[str] = np.arange(0 ,self.num_inference_steps )[::-1].copy() snake_case : str = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) snake_case : Optional[Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] snake_case : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa ,device=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ): '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: snake_case : Any = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 ) else: snake_case : Any = 0 # sample eps ~ N(0, S_noise^2 * I) snake_case : Optional[int] = self.config.s_noise * randn_tensor(sample.shape ,generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) snake_case : Union[str, Any] = sigma + gamma * sigma snake_case : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = True ,): '''simple docstring''' snake_case : Optional[int] = sample_hat + sigma_hat * model_output snake_case : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat snake_case : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ ,derivative=SCREAMING_SNAKE_CASE_ ,pred_original_sample=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = True ,): '''simple docstring''' snake_case : int = sample_prev + sigma_prev * model_output snake_case : str = (sample_prev - pred_original_sample) / sigma_prev snake_case : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ ,derivative=SCREAMING_SNAKE_CASE_ ,pred_original_sample=SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' raise NotImplementedError()
36
import numpy as np def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__: int= ya SCREAMING_SNAKE_CASE__: Tuple= xa for k in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] ) SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
64
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class A__ ( A__ ): """simple docstring""" _lowercase = 'M-CLIP' def __init__( self : Any , lowerCamelCase__ : Optional[Any]=1_024 , lowerCamelCase__ : Union[str, Any]=768 , **lowerCamelCase__ : Optional[Any] ): a__ : Any = transformerDimSize a__ : Any = imageDimSize super().__init__(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" _lowercase = MCLIPConfig def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ): super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) a__ : Optional[Any] = XLMRobertaModel(lowerCamelCase__ ) a__ : Any = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ): a__ : str = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] a__ : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowerCamelCase__ ), embs
37
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' ) self.assertIsInstance(lowerCAmelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' ) self.assertIsInstance(lowerCAmelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
64
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = 42 class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE=True , ): super().__init__() snake_case__ : str = layers_per_block snake_case__ : int = torch.nn.Convad( __SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) snake_case__ : List[Any] = None snake_case__ : List[Any] = nn.ModuleList([] ) # down snake_case__ : Union[str, Any] = block_out_channels[0] for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ): snake_case__ : Optional[Any] = output_channel snake_case__ : Union[str, Any] = block_out_channels[i] snake_case__ : int = i == len(__SCREAMING_SNAKE_CASE ) - 1 snake_case__ : str = get_down_block( __SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , ) self.down_blocks.append(__SCREAMING_SNAKE_CASE ) # mid snake_case__ : Optional[Any] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , ) # out snake_case__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 ) snake_case__ : Tuple = nn.SiLU() snake_case__ : str = 2 * out_channels if double_z else out_channels snake_case__ : int = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 ) snake_case__ : Union[str, Any] = False def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): snake_case__ : Optional[Any] = x snake_case__ : int = self.conv_in(__SCREAMING_SNAKE_CASE ) if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE ) # middle snake_case__ : List[Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE ) else: for down_block in self.down_blocks: snake_case__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) # middle snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE ) else: # down for down_block in self.down_blocks: snake_case__ : List[str] = down_block(__SCREAMING_SNAKE_CASE ) # middle snake_case__ : str = self.mid_block(__SCREAMING_SNAKE_CASE ) # post-process snake_case__ : Any = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = self.conv_act(__SCREAMING_SNAKE_CASE ) snake_case__ : str = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE=(6_4,) , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE="silu" , __SCREAMING_SNAKE_CASE="group" , ): super().__init__() snake_case__ : Any = layers_per_block snake_case__ : Optional[Any] = nn.Convad( __SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) snake_case__ : Union[str, Any] = None snake_case__ : Dict = nn.ModuleList([] ) snake_case__ : Optional[int] = in_channels if norm_type == """spatial""" else None # mid snake_case__ : Tuple = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , ) # up snake_case__ : List[Any] = list(reversed(__SCREAMING_SNAKE_CASE ) ) snake_case__ : Optional[Any] = reversed_block_out_channels[0] for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ): snake_case__ : List[Any] = output_channel snake_case__ : Optional[Any] = reversed_block_out_channels[i] snake_case__ : List[str] = i == len(__SCREAMING_SNAKE_CASE ) - 1 snake_case__ : int = get_up_block( __SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , ) self.up_blocks.append(__SCREAMING_SNAKE_CASE ) snake_case__ : int = output_channel # out if norm_type == "spatial": snake_case__ : List[Any] = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE ) else: snake_case__ : Any = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1e-6 ) snake_case__ : Tuple = nn.SiLU() snake_case__ : Union[str, Any] = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 ) snake_case__ : int = False def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ): snake_case__ : Union[str, Any] = z snake_case__ : Any = self.conv_in(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__SCREAMING_SNAKE_CASE ): def custom_forward(*__SCREAMING_SNAKE_CASE ): return module(*__SCREAMING_SNAKE_CASE ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle snake_case__ : int = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE ) snake_case__ : int = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: snake_case__ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE ) else: # middle snake_case__ : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: snake_case__ : str = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: # middle snake_case__ : List[Any] = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = sample.to(__SCREAMING_SNAKE_CASE ) # up for up_block in self.up_blocks: snake_case__ : Dict = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # post-process if latent_embeds is None: snake_case__ : Optional[Any] = self.conv_norm_out(__SCREAMING_SNAKE_CASE ) else: snake_case__ : str = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : Any = self.conv_act(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = self.conv_out(__SCREAMING_SNAKE_CASE ) return sample class __snake_case ( nn.Module ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="random" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True ): super().__init__() snake_case__ : int = n_e snake_case__ : Optional[int] = vq_embed_dim snake_case__ : int = beta snake_case__ : Optional[int] = legacy snake_case__ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) snake_case__ : List[str] = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) snake_case__ : Optional[Any] = self.used.shape[0] snake_case__ : List[str] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": snake_case__ : Dict = self.re_embed snake_case__ : List[str] = self.re_embed + 1 print( f"Remapping {self.n_e} indices to {self.re_embed} indices. " f"Using {self.unknown_index} for unknown indices." ) else: snake_case__ : Union[str, Any] = n_e snake_case__ : str = sane_index_shape def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): snake_case__ : Any = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 snake_case__ : Dict = inds.reshape(ishape[0] , -1 ) snake_case__ : Any = self.used.to(__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = (inds[:, :, None] == used[None, None, ...]).long() snake_case__ : List[Any] = match.argmax(-1 ) snake_case__ : List[str] = match.sum(2 ) < 1 if self.unknown_index == "random": snake_case__ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: snake_case__ : Optional[Any] = self.unknown_index return new.reshape(__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): snake_case__ : List[Any] = inds.shape assert len(__SCREAMING_SNAKE_CASE ) > 1 snake_case__ : int = inds.reshape(ishape[0] , -1 ) snake_case__ : Optional[int] = self.used.to(__SCREAMING_SNAKE_CASE ) if self.re_embed > self.used.shape[0]: # extra token snake_case__ : List[Any] = 0 # simply set to zero snake_case__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE ) return back.reshape(__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ): # reshape z -> (batch, height, width, channel) and flatten snake_case__ : Any = z.permute(0 , 2 , 3 , 1 ).contiguous() snake_case__ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z snake_case__ : Dict = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 ) snake_case__ : Union[str, Any] = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape ) snake_case__ : List[str] = None snake_case__ : Union[str, Any] = None # compute loss for embedding if not self.legacy: snake_case__ : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: snake_case__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients snake_case__ : Any = z + (z_q - z).detach() # reshape back to match original input shape snake_case__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: snake_case__ : List[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis snake_case__ : str = self.remap_to_used(__SCREAMING_SNAKE_CASE ) snake_case__ : str = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: snake_case__ : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # shape specifying (batch, height, width, channel) if self.remap is not None: snake_case__ : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis snake_case__ : Optional[int] = self.unmap_to_all(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors snake_case__ : int = self.embedding(__SCREAMING_SNAKE_CASE ) if shape is not None: snake_case__ : str = z_q.view(__SCREAMING_SNAKE_CASE ) # reshape back to match original input shape snake_case__ : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ): snake_case__ : Tuple = parameters snake_case__ , snake_case__ : Any = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 ) snake_case__ : Union[str, Any] = torch.clamp(self.logvar , -30.0 , 20.0 ) snake_case__ : Optional[int] = deterministic snake_case__ : Optional[int] = torch.exp(0.5 * self.logvar ) snake_case__ : Any = torch.exp(self.logvar ) if self.deterministic: snake_case__ : List[str] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE = None ): # make sure sample is on the same device as the parameters and has same dtype snake_case__ : Dict = randn_tensor( self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype ) snake_case__ : Optional[int] = self.mean + self.std * sample return x def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) snake_case__ : Any = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self ): return self.mean
38
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase_ : Tuple = TypeVar('T') class _lowerCamelCase ( Generic[T] ): def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: Any | T= None SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr SCREAMING_SNAKE_CASE__: List[Any]= fnc self.build() def UpperCamelCase_ ( self ) -> None: for p in range(self.N - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None: p += self.N SCREAMING_SNAKE_CASE__: Union[str, Any]= v while p > 1: SCREAMING_SNAKE_CASE__: Any= p // 2 SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N SCREAMING_SNAKE_CASE__: T | None= None while l <= r: if l % 2 == 1: SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] ) if r % 2 == 0: SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] lowercase_ : str = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } lowercase_ : int = SegmentTree(test_array, min) lowercase_ : Optional[int] = SegmentTree(test_array, max) lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b) def A__ ( ): for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): lowercase_ : int = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
64
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class snake_case_ ( __A , __A ): '''simple docstring''' @register_to_config def __init__( self : Dict , *, _UpperCamelCase : int = 4 , _UpperCamelCase : int = 7_6_8 , _UpperCamelCase : int , _UpperCamelCase : Dict , ) ->int: super().__init__() snake_case_ = nn.Parameter(torch.zeros(_UpperCamelCase ) ) # parameters for additional clip time embeddings snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase ) snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase ) # parameters for encoder hidden states snake_case_ = clip_extra_context_tokens snake_case_ = nn.Linear( _UpperCamelCase , self.clip_extra_context_tokens * cross_attention_dim ) snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase ) snake_case_ = nn.LayerNorm(_UpperCamelCase ) def snake_case__( self : Any , *, _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) ->str: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings snake_case_ = image_embeddings.shape[0] snake_case_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) snake_case_ = classifier_free_guidance_embeddings.expand( _UpperCamelCase , -1 ) snake_case_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] snake_case_ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... snake_case_ = self.embedding_proj(_UpperCamelCase ) snake_case_ = self.clip_image_embeddings_project_to_time_embeddings(_UpperCamelCase ) snake_case_ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" snake_case_ = self.clip_extra_context_tokens_proj(_UpperCamelCase ) snake_case_ = clip_extra_context_tokens.reshape(_UpperCamelCase , -1 , self.clip_extra_context_tokens ) snake_case_ = clip_extra_context_tokens.permute(0 , 2 , 1 ) snake_case_ = self.encoder_hidden_states_proj(_UpperCamelCase ) snake_case_ = self.text_encoder_hidden_states_norm(_UpperCamelCase ) snake_case_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
39
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __a = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ) -> str: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= 2 SCREAMING_SNAKE_CASE__: Tuple= randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ) SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: Tuple= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> Tuple: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase_ ( self ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(lowerCAmelCase ): if isinstance(lowerCAmelCase , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) SCREAMING_SNAKE_CASE__: Any= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] ) SCREAMING_SNAKE_CASE__: int= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= 2 SCREAMING_SNAKE_CASE__: Tuple= [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), ] SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: int= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components() SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= 10.0 SCREAMING_SNAKE_CASE__: Any= 4 SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0] SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCamelCase_ ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components() SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(lowerCAmelCase ) except NotImplementedError: pass @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird''' SCREAMING_SNAKE_CASE__: List[str]= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: List[Any]= load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: Optional[Any]= pipe( lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE__: str= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
64
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class lowerCAmelCase_ ( a__ ): UpperCAmelCase__ : List[str] = "unispeech" def __init__( self, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_="group", SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=(512, 512, 512, 512, 512, 512, 512), SCREAMING_SNAKE_CASE_=(5, 2, 2, 2, 2, 2, 2), SCREAMING_SNAKE_CASE_=(10, 3, 3, 3, 3, 2, 2), SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=0.05, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=320, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_="mean", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=256, SCREAMING_SNAKE_CASE_=80, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.5, **SCREAMING_SNAKE_CASE_, ) -> List[str]: super().__init__(**SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = hidden_size UpperCamelCase : Dict = feat_extract_norm UpperCamelCase : Any = feat_extract_activation UpperCamelCase : str = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = list(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = conv_bias UpperCamelCase : Union[str, Any] = num_conv_pos_embeddings UpperCamelCase : Union[str, Any] = num_conv_pos_embedding_groups UpperCamelCase : int = len(self.conv_dim ) UpperCamelCase : str = num_hidden_layers UpperCamelCase : List[Any] = intermediate_size UpperCamelCase : List[str] = hidden_act UpperCamelCase : str = num_attention_heads UpperCamelCase : Dict = hidden_dropout UpperCamelCase : Any = attention_dropout UpperCamelCase : List[Any] = activation_dropout UpperCamelCase : Any = feat_proj_dropout UpperCamelCase : Tuple = final_dropout UpperCamelCase : Optional[Any] = layerdrop UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : Tuple = initializer_range UpperCamelCase : Optional[Any] = num_ctc_classes UpperCamelCase : Any = vocab_size UpperCamelCase : int = do_stable_layer_norm UpperCamelCase : int = use_weighted_layer_sum UpperCamelCase : Tuple = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCamelCase : str = apply_spec_augment UpperCamelCase : Optional[int] = mask_time_prob UpperCamelCase : Any = mask_time_length UpperCamelCase : Optional[Any] = mask_time_min_masks UpperCamelCase : int = mask_feature_prob UpperCamelCase : Union[str, Any] = mask_feature_length UpperCamelCase : Optional[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations UpperCamelCase : int = num_codevectors_per_group UpperCamelCase : str = num_codevector_groups UpperCamelCase : Optional[Any] = contrastive_logits_temperature UpperCamelCase : Optional[Any] = feat_quantizer_dropout UpperCamelCase : Union[str, Any] = num_negatives UpperCamelCase : Dict = codevector_dim UpperCamelCase : Tuple = proj_codevector_dim UpperCamelCase : int = diversity_loss_weight # ctc loss UpperCamelCase : int = ctc_loss_reduction UpperCamelCase : str = ctc_zero_infinity # pretraining loss UpperCamelCase : Dict = replace_prob @property def snake_case_ ( self ) -> List[str]: return functools.reduce(operator.mul, self.conv_stride, 1 )
40
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _lowerCamelCase : __a = 42 # setable values __a = 42 __a = 42 __a = None @classmethod def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]: return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase ) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = 42 class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ): __a = [e.name for e in FlaxKarrasDiffusionSchedulers] __a = 42 @property def UpperCamelCase_ ( self ) -> List[Any]: return True @register_to_config def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= dtype def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState: if common is None: SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype ) SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray: return sample def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState: SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]: SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": SCREAMING_SNAKE_CASE__: List[Any]= variance SCREAMING_SNAKE_CASE__: Any= state.common.betas[t] SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep if key is None: SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 ) else: SCREAMING_SNAKE_CASE__: Any= None # 1. compute alphas, betas SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE__: str= model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 ) SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def __len__( self ) -> Tuple: return self.config.num_train_timesteps
64
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli' SCREAMING_SNAKE_CASE : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) SCREAMING_SNAKE_CASE : Any = 'text_classifier' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']] SCREAMING_SNAKE_CASE : List[str] = ['text'] def SCREAMING_SNAKE_CASE ( self : List[Any] ): super().setup() __lowercase = self.model.config __lowercase = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __lowercase = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ): __lowercase = labels return self.pre_processor( [text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = outputs.logits __lowercase = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
41
def A__ ( snake_case_ : int ): if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 SCREAMING_SNAKE_CASE__: List[str]= 1 if upper_limit > 0: SCREAMING_SNAKE_CASE__: List[str]= 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(snake_case_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: lowercase_ : Any = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
64
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ) -> list: if len(__UpperCamelCase ) <= 1: return [tuple(__UpperCamelCase )] lowerCamelCase_ = [] def generate(__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase_ = [0] * n res.append(tuple(__UpperCamelCase ) ) lowerCamelCase_ = 0 while i < n: if c[i] < i: if i % 2 == 0: lowerCamelCase_ ,lowerCamelCase_ = arr[i], arr[0] else: lowerCamelCase_ ,lowerCamelCase_ = arr[i], arr[c[i]] res.append(tuple(__UpperCamelCase ) ) c[i] += 1 lowerCamelCase_ = 0 else: lowerCamelCase_ = 0 i += 1 generate(len(__UpperCamelCase ) ,__UpperCamelCase ) return res if __name__ == "__main__": A_ = input("Enter numbers separated by a comma:\n").strip() A_ = [int(item) for item in user_input.split(",")] print(heaps(arr))
42
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
64
0
from collections import defaultdict class _a : def __init__( self: str , UpperCamelCase_: Any , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" lowercase__ = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 lowercase__ = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) ) ] lowercase__ = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 lowercase__ = (1 << len(UpperCamelCase_ )) - 1 def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any] ) -> List[str]: """simple docstring""" if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement lowercase__ = self.count_ways_until(UpperCamelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. lowercase__ = total_ways_util return self.dp[mask][task_no] def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Optional[Any] ) -> List[str]: """simple docstring""" for i in range(len(UpperCamelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCamelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": lowerCAmelCase = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowerCAmelCase = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
43
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ : Optional[int] = logging.get_logger(__name__) def A__ ( snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights SCREAMING_SNAKE_CASE__: List[str]= [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: str= { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: int= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE__: int= sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' ) SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE__: List[Any]= q SCREAMING_SNAKE_CASE__: Any= k SCREAMING_SNAKE_CASE__: Optional[Any]= v del sd[key] return sd @torch.no_grad() def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ): SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ ) if config is not None: SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ ) else: SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig() SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval() model.load_state_dict(snake_case_ ) # Check results Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') lowercase_ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
64
0
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Optional[int] = [] for line in lines: _lowerCamelCase : List[str] = re.sub(r"#.*" , "" , _lowerCAmelCase ) # remove comments if line: filtered_lines.append(_lowerCAmelCase ) _lowerCamelCase : Optional[Any] = "\n".join(_lowerCAmelCase ) # Make a hash from all this code _lowerCamelCase : Tuple = full_str.encode("utf-8" ) return shaaaa(_lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : int = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Union[str, Any] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Dict = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
44
def A__ ( snake_case_ : float , snake_case_ : float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
64
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = "pytorch_model.bin" @dataclasses.dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = dataclasses.field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} ) _snake_case : Optional[str] = dataclasses.field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} , ) @dataclasses.dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} ) _snake_case : str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} ) _snake_case : Optional[str] = dataclasses.field( default=lowercase , metadata={"""help""": """A csv or a json file containing the validation data."""} ) _snake_case : Optional[str] = dataclasses.field( default=lowercase , metadata={"""help""": """The name of the task to train on."""} , ) _snake_case : Optional[List[str]] = dataclasses.field( default=lowercase , metadata={"""help""": """The list of labels for the task."""} ) @dataclasses.dataclass class lowerCAmelCase_ : """simple docstring""" _snake_case : str = dataclasses.field( metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} ) _snake_case : Optional[str] = dataclasses.field( default="""accuracy""" , metadata={"""help""": """The evaluation metric used for the task."""} ) _snake_case : Optional[str] = dataclasses.field( default="""no""" , metadata={ """help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]""" } , ) _snake_case : Optional[int] = dataclasses.field( default=10 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) _snake_case : Optional[float] = dataclasses.field( default=0.0 , metadata={ """help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions.""" } , ) _snake_case : Optional[bool] = dataclasses.field( default=lowercase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} , ) _snake_case : Optional[bool] = dataclasses.field( default=lowercase , metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} , ) _snake_case : Optional[bool] = dataclasses.field( default=lowercase , metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} , ) _snake_case : Optional[float] = dataclasses.field( default=0.0 , metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} , ) _snake_case : Optional[int] = dataclasses.field( default=100 , metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} , ) _snake_case : Optional[int] = dataclasses.field( default=lowercase , metadata={"""help""": """Random seed for initialization."""} , ) def A ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] ) -> List[str]: UpperCamelCase__ :Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: UpperCamelCase__ :List[str] = dataset.filter(lambda lowercase__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 UpperCamelCase__ :Any = int(eval_result * len(lowercase__ ) ) print(lowercase__ ) UpperCamelCase__ :Optional[Any] = dataset.sort("""probability""" , reverse=lowercase__ ) UpperCamelCase__ :Optional[Any] = dataset.select(range(lowercase__ ) ) UpperCamelCase__ :int = dataset.remove_columns(["""label""", """probability"""] ) UpperCamelCase__ :str = dataset.rename_column("""prediction""" , """label""" ) UpperCamelCase__ :Optional[Any] = dataset.map(lambda lowercase__ : {"label": idalabel[example["label"]]} ) UpperCamelCase__ :Dict = dataset.shuffle(seed=args.seed ) UpperCamelCase__ :int = os.path.join(lowercase__ , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(lowercase__ , index=lowercase__ ) else: dataset.to_json(lowercase__ ) def A ( lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , **lowercase__ : Dict ) -> Tuple: UpperCamelCase__ :int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() UpperCamelCase__ :List[str] = STModelArguments(model_name_or_path=lowercase__ ) UpperCamelCase__ :str = STDataArguments(train_file=lowercase__ , infer_file=lowercase__ ) UpperCamelCase__ :List[Any] = STTrainingArguments(output_dir=lowercase__ ) UpperCamelCase__ :List[Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(lowercase__ ).items(): setattr(lowercase__ , lowercase__ , lowercase__ ) for key, value in kwargs.items(): if hasattr(lowercase__ , lowercase__ ): setattr(lowercase__ , lowercase__ , lowercase__ ) # Sanity checks UpperCamelCase__ :Tuple = {} UpperCamelCase__ :Tuple = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None UpperCamelCase__ :Optional[Any] = args.train_file UpperCamelCase__ :Optional[int] = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None UpperCamelCase__ :Optional[Any] = args.eval_file for key in data_files: UpperCamelCase__ :Dict = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: UpperCamelCase__ :Any = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) UpperCamelCase__ :Optional[int] = f"""{args.output_dir}/self-train_iter-{{}}""".format UpperCamelCase__ :str = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=lowercase__ ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) accelerator.wait_for_everyone() UpperCamelCase__ :Union[str, Any] = None UpperCamelCase__ :str = None UpperCamelCase__ :List[str] = 0 UpperCamelCase__ :Optional[int] = False # Show the progress bar UpperCamelCase__ :Optional[int] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): UpperCamelCase__ :int = data_dir_format(lowercase__ ) assert os.path.exists(lowercase__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 UpperCamelCase__ :Tuple = os.path.join(lowercase__ , """stage-1""" ) UpperCamelCase__ :Tuple = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(lowercase__ , lowercase__ ): arguments_dict.update({key: value} ) UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """best-checkpoint""" , lowercase__ ) if os.path.exists(lowercase__ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , lowercase__ , lowercase__ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , lowercase__ ) finetune(**lowercase__ ) accelerator.wait_for_everyone() assert os.path.exists(lowercase__ ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , lowercase__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , """best-checkpoint""" ) UpperCamelCase__ :List[Any] = os.path.join(lowercase__ , """stage-2""" ) # Update arguments_dict UpperCamelCase__ :List[Any] = model_path UpperCamelCase__ :Any = data_files["""train"""] UpperCamelCase__ :Optional[int] = current_output_dir UpperCamelCase__ :str = os.path.join(lowercase__ , """best-checkpoint""" , lowercase__ ) if os.path.exists(lowercase__ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , lowercase__ , lowercase__ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , lowercase__ ) finetune(**lowercase__ ) accelerator.wait_for_everyone() assert os.path.exists(lowercase__ ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , lowercase__ ) UpperCamelCase__ :Tuple = iteration UpperCamelCase__ :List[Any] = data_dir_format(iteration + 1 ) UpperCamelCase__ :Optional[Any] = AutoConfig.from_pretrained(os.path.join(lowercase__ , """best-checkpoint""" ) ) UpperCamelCase__ :List[str] = config.idalabel UpperCamelCase__ :Union[str, Any] = os.path.join(lowercase__ , """eval_results_best-checkpoint.json""" ) UpperCamelCase__ :Dict = os.path.join(lowercase__ , """test_results_best-checkpoint.json""" ) assert os.path.exists(lowercase__ ) with open(lowercase__ , """r""" ) as f: UpperCamelCase__ :List[Any] = float(json.load(lowercase__ )[args.eval_metric] ) UpperCamelCase__ :Any = os.path.join(lowercase__ , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(lowercase__ ) # Loading the dataset from local csv or json files. UpperCamelCase__ :Any = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] UpperCamelCase__ :Optional[int] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(lowercase__ , exist_ok=lowercase__ ) shutil.copy(lowercase__ , os.path.join(lowercase__ , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(lowercase__ ): shutil.copy(lowercase__ , os.path.join(lowercase__ , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) accelerator.wait_for_everyone() UpperCamelCase__ :List[str] = os.path.join(lowercase__ , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: UpperCamelCase__ :List[Any] = eval_result if best_iteration is None: UpperCamelCase__ :Union[str, Any] = new_iteration UpperCamelCase__ :List[str] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: UpperCamelCase__ :Optional[Any] = new_iteration UpperCamelCase__ :Optional[int] = new_eval_result UpperCamelCase__ :Optional[int] = 0 else: if new_eval_result == best_eval_result: UpperCamelCase__ :str = new_iteration UpperCamelCase__ :Dict = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: UpperCamelCase__ :Optional[int] = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , lowercase__ ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowercase__ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(lowercase__ , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowercase__ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(lowercase__ , """eval_results_best-iteration.json""" ) , )
45
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Any = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(_lowerCamelCase , _lowerCamelCase ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" _lowerCamelCase : Optional[Any] = False if num < 0: _lowerCamelCase : Tuple = True _lowerCamelCase : str = -num _lowerCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_lowerCamelCase ) for e in binary ) return "0b" + "".join(str(_lowerCamelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
46
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __a = Features({"text": Value("string" )} ) __a = Features({"labels": ClassLabel} ) __a = "text" __a = "labels" def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self ) SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy() SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column] SCREAMING_SNAKE_CASE__: List[str]= label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
64
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ): __a : Dict = filter(lambda lowerCamelCase_ : p.requires_grad , model.parameters() ) __a : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ): if metric == "rouge2": __a : int = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __a : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __a : Any = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __a : int = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) __a : Union[str, Any] = ModelCheckpoint( dirpath=lowerCamelCase_ , filename=lowerCamelCase_ , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any ): return EarlyStopping( monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=lowerCamelCase_ , verbose=lowerCamelCase_ , ) class _UpperCamelCase( pl.Callback ): def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' __a : List[str] = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ ) @rank_zero_only def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=True ): '''simple docstring''' logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) __a : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __a : Tuple = Path(pl_module.hparams.output_dir ) if type_path == "test": __a : int = od / 'test_results.txt' __a : str = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __a : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' __a : List[str] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer: for key in sorted(SCREAMING_SNAKE_CASE__ ): if key in ["log", "progress_bar", "preds"]: continue __a : Tuple = metrics[key] if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): __a : Union[str, Any] = val.item() __a : List[str] = f'''{key}: {val:.6f}\n''' writer.write(SCREAMING_SNAKE_CASE__ ) if not save_generations: return if "preds" in metrics: __a : List[Any] = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ ) @rank_zero_only def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' try: __a : Optional[int] = pl_module.model.model.num_parameters() except AttributeError: __a : List[Any] = pl_module.model.num_parameters() __a : Dict = count_trainable_parameters(SCREAMING_SNAKE_CASE__ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} ) @rank_zero_only def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' ) @rank_zero_only def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
47
import inspect import unittest class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion''' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE__: int= '''invisible-watermark''' assert backend in deps, f'{backend} is not in the deps table!'
64
0
'''simple docstring''' from typing import Any def A ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : dict , UpperCamelCase_ : dict , UpperCamelCase_ : dict , ) -> list: '''simple docstring''' _validation( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) # Creates data structures and fill initial step lowerCAmelCase__ = {} lowerCAmelCase__ = {} for state in states_space: lowerCAmelCase__ = observations_space[0] lowerCAmelCase__ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(UpperCamelCase_ ) ): lowerCAmelCase__ = observations_space[o] lowerCAmelCase__ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCAmelCase__ = "" lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state # Update probabilities and pointers dicts lowerCAmelCase__ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = arg_max # The final observation lowerCAmelCase__ = observations_space[len(UpperCamelCase_ ) - 1] # argmax for given final observation lowerCAmelCase__ = "" lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state lowerCAmelCase__ = arg_max # Process pointers backwards lowerCAmelCase__ = last_state lowerCAmelCase__ = [] for o in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ): result.append(UpperCamelCase_ ) lowerCAmelCase__ = pointers[previous, observations_space[o]] result.reverse() return result def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None: '''simple docstring''' _validate_not_empty( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) _validate_lists(UpperCamelCase_ , UpperCamelCase_ ) _validate_dicts( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> None: '''simple docstring''' _validate_list(UpperCamelCase_ , "observations_space" ) _validate_list(UpperCamelCase_ , "states_space" ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None: '''simple docstring''' if not isinstance(_object , UpperCamelCase_ ): lowerCAmelCase__ = F"""{var_name} must be a list""" raise ValueError(UpperCamelCase_ ) else: for x in _object: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase__ = F"""{var_name} must be a list of strings""" raise ValueError(UpperCamelCase_ ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None: '''simple docstring''' _validate_dict(UpperCamelCase_ , "initial_probabilities" , UpperCamelCase_ ) _validate_nested_dict(UpperCamelCase_ , "transition_probabilities" ) _validate_nested_dict(UpperCamelCase_ , "emission_probabilities" ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None: '''simple docstring''' _validate_dict(_object , UpperCamelCase_ , UpperCamelCase_ ) for x in _object.values(): _validate_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : type , UpperCamelCase_ : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , UpperCamelCase_ ): lowerCAmelCase__ = F"""{var_name} must be a dict""" raise ValueError(UpperCamelCase_ ) if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object ): lowerCAmelCase__ = F"""{var_name} all keys must be strings""" raise ValueError(UpperCamelCase_ ) if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object.values() ): lowerCAmelCase__ = "nested dictionary " if nested else "" lowerCAmelCase__ = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(UpperCamelCase_ ) if __name__ == "__main__": from doctest import testmod testmod()
48
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , ) assert hasattr(self , '''env''' ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: # configuration for running training on smdistributed Model Parallel SCREAMING_SNAKE_CASE__: Optional[Any]= { '''enabled''': True, '''processes_per_host''': 8, } SCREAMING_SNAKE_CASE__: Dict= { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: # create estimator SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase ) # run training estimator.fit() # result dataframe SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping SCREAMING_SNAKE_CASE__: List[Any]= ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
64
0
"""simple docstring""" import math def lowercase__ ( snake_case_ :int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase__ ( snake_case_ :float = 0.1 ): __UpperCAmelCase = 3 __UpperCAmelCase = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(snake_case_ ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
49
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): @property def UpperCamelCase_ ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions() SCREAMING_SNAKE_CASE__: List[str]= False return options def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__: int= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__: Tuple= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__: Any= pipe( prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__: Any= output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
64
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ,_lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ): lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_lowerCAmelCase ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sgugger/tiny-distilbert-classification""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,torchscript=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" ,"""Cant do half precision""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,fpaa=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) # set architectures equal to `None` lowerCamelCase__ = None lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" ,"""Can't do half precision""" ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tinier_bart""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tinier_bart""" lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase ) lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ,configs=[config] ) lowerCamelCase__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,train_memory_csv_file=os.path.join(_lowerCAmelCase ,"""train_mem.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,train_time_csv_file=os.path.join(_lowerCAmelCase ,"""train_time.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() ) def UpperCamelCase_ ( self ): lowerCamelCase__ = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase__ = PyTorchBenchmarkArguments( models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,) lowerCamelCase__ = PyTorchBenchmark(_lowerCAmelCase ) lowerCamelCase__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
50
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **lowerCAmelCase ) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:] setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript ) SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**lowerCAmelCase ) __a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) __a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) __a = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 elif is_torch_tpu_available(): SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device() SCREAMING_SNAKE_CASE__: Any= 0 else: SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count() return device, n_gpu @property def UpperCamelCase_ ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCamelCase_ ( self ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def UpperCamelCase_ ( self ) -> str: return self.n_gpu > 0
64
0
'''simple docstring''' import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: """simple docstring""" return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :] def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="attention" ) -> Dict: """simple docstring""" UpperCAmelCase = UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] ) UpperCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] ) UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] ) UpperCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] ) UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] ) UpperCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] ) UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] ) UpperCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] ) return k, o, q, v def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> List[str]: """simple docstring""" if split_mlp_wi: UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :] UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :] UpperCAmelCase = (wi_a, wi_a) else: UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :] UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :] return wi, wo def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Any: """simple docstring""" return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i] def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[str]: """simple docstring""" UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] ) UpperCAmelCase = {'''/'''.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase = '''encoder/encoder/mlp/wi_0/kernel''' in old print('''Split MLP:''' , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = collections.OrderedDict() # Shared embeddings. UpperCAmelCase = old['''token_embedder/embedding'''] # Encoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''pre_attention_layer_norm''' ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''attention''' ) UpperCAmelCase = layer_norm UpperCAmelCase = k.T UpperCAmelCase = o.T UpperCAmelCase = q.T UpperCAmelCase = v.T # Block i, layer 1 (MLP). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase, UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = layer_norm if split_mlp_wi: UpperCAmelCase = wi[0].T UpperCAmelCase = wi[1].T else: UpperCAmelCase = wi.T UpperCAmelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' ).T UpperCAmelCase = old['''encoder/encoder_norm/scale'''] if not scalable_attention: UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , 0 , '''encoder''' ).T UpperCAmelCase = tax_relpos_bias_lookup( SCREAMING_SNAKE_CASE_ , 0 , '''decoder''' ).T if not is_encoder_only: # Decoder. for i in range(SCREAMING_SNAKE_CASE_ ): # Block i, layer 0 (Self Attention). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_self_attention_layer_norm''' ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''self_attention''' ) UpperCAmelCase = layer_norm UpperCAmelCase = k.T UpperCAmelCase = o.T UpperCAmelCase = q.T UpperCAmelCase = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''encoder_decoder_attention''' ) UpperCAmelCase = layer_norm UpperCAmelCase = k.T UpperCAmelCase = o.T UpperCAmelCase = q.T UpperCAmelCase = v.T # Block i, layer 2 (MLP). UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase, UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = layer_norm if split_mlp_wi: UpperCAmelCase = wi[0].T UpperCAmelCase = wi[1].T else: UpperCAmelCase = wi.T UpperCAmelCase = wo.T if scalable_attention: # convert the rel_embedding of each layer UpperCAmelCase = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' ).T UpperCAmelCase = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T return new def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool ) -> Any: """simple docstring""" UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) UpperCAmelCase = state_dict['''shared.weight'''] return state_dict def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: """simple docstring""" UpperCAmelCase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = convert_tax_to_pytorch( SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ , scalable_attention=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Dict: """simple docstring""" UpperCAmelCase = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase = UMTaEncoderModel(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Verify that we can load the checkpoint. model.from_pretrained(SCREAMING_SNAKE_CASE_ ) print('''Done''' ) if __name__ == "__main__": a__ : List[str] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) a__ : List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
51
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str: SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30} SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} SCREAMING_SNAKE_CASE__: Dict= parent SCREAMING_SNAKE_CASE__: List[str]= batch_size SCREAMING_SNAKE_CASE__: int= num_channels SCREAMING_SNAKE_CASE__: int= min_resolution SCREAMING_SNAKE_CASE__: List[Any]= max_resolution SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop SCREAMING_SNAKE_CASE__: Union[str, Any]= size SCREAMING_SNAKE_CASE__: Dict= crop_pct SCREAMING_SNAKE_CASE__: Optional[int]= crop_size SCREAMING_SNAKE_CASE__: Dict= do_normalize SCREAMING_SNAKE_CASE__: List[str]= image_mean SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std def UpperCamelCase_ ( self ) -> Tuple: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase_ ( self ) -> Tuple: pass def UpperCamelCase_ ( self ) -> Optional[int]: # Initialize image_processing SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> int: # Initialize image_processing SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
64
0
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ): __a : Tuple = parent __a : Optional[Any] = batch_size __a : List[Any] = image_size __a : Optional[int] = num_channels __a : int = num_stages __a : List[str] = hidden_sizes __a : Tuple = depths __a : Union[str, Any] = is_training __a : Optional[int] = use_labels __a : Union[str, Any] = intermediate_size __a : str = hidden_act __a : List[str] = num_labels __a : Union[str, Any] = initializer_range __a : Dict = out_features __a : Union[str, Any] = out_indices __a : Optional[int] = scope def _lowerCamelCase ( self ): __a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Optional[int] = None if self.use_labels: __a : Dict = ids_tensor([self.batch_size] , self.num_labels ) __a : Optional[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Optional[int] = ConvNextModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Tuple = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : int = ConvNextForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Optional[Any] = ConvNextBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : List[str] = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __a : List[str] = None __a : Union[str, Any] = ConvNextBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _lowerCamelCase ( self ): __a : List[str] = self.prepare_config_and_inputs() __a , __a , __a : int = config_and_inputs __a : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) __lowerCAmelCase = ( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = True __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def _lowerCamelCase ( self ): __a : Optional[int] = ConvNextModelTester(self ) __a : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def _lowerCamelCase ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self ): return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def _lowerCamelCase ( self ): pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def _lowerCamelCase ( self ): pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def _lowerCamelCase ( self ): pass def _lowerCamelCase ( self ): __a , __a : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Tuple = model_class(_UpperCAmelCase ) __a : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : str = [*signature.parameters.keys()] __a : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def _lowerCamelCase ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __a : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __a : Optional[int] = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : List[Any] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : int = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def _lowerCamelCase ( self ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Any = ConvNextModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __A ( ) -> Any: __a : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_torch @require_vision class __lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCamelCase ( self ): return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def _lowerCamelCase ( self ): __a : Optional[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_UpperCAmelCase ) __a : Optional[Any] = self.default_image_processor __a : Optional[Any] = prepare_img() __a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __a : int = model(**_UpperCAmelCase ) # verify the logits __a : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __a : int = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) @require_torch class __lowercase ( unittest.TestCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = (ConvNextBackbone,) if is_torch_available() else () __lowerCAmelCase = ConvNextConfig __lowerCAmelCase = False def _lowerCamelCase ( self ): __a : Tuple = ConvNextModelTester(self )
52
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase_ : Tuple = 3 def A__ ( snake_case_ : int ): print('''Generating primitive root of p''' ) while True: SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ ) if pow(snake_case_ , 2 , snake_case_ ) == 1: continue if pow(snake_case_ , snake_case_ , snake_case_ ) == 1: continue return g def A__ ( snake_case_ : int ): print('''Generating prime p...''' ) SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number. SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d) return public_key, private_key def A__ ( snake_case_ : str , snake_case_ : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , '''w''' ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def A__ ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
64
0
import itertools import math def a_ ( lowerCAmelCase_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(lowerCAmelCase_ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a_ ( ): __lowerCAmelCase = 2 while True: if is_prime(lowerCAmelCase_ ): yield num num += 1 def a_ ( lowerCAmelCase_ : int = 1_0001 ): return next(itertools.islice(prime_generator(), nth - 1, lowerCAmelCase_ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
53
from math import factorial def A__ ( snake_case_ : int , snake_case_ : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(5_2, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(4_0, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(1_0, 3)} ways that first, second and''', 'third place can be awarded.', )
64
0
class A : def __init__( self: Optional[Any] , _lowerCAmelCase: List[str] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =val UpperCAmelCase_ =None UpperCAmelCase_ =None def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Tuple ) -> List[str]: '''simple docstring''' if self.val: if val < self.val: if self.left is None: UpperCAmelCase_ =Node(_lowerCAmelCase ) else: self.left.insert(_lowerCAmelCase ) elif val > self.val: if self.right is None: UpperCAmelCase_ =Node(_lowerCAmelCase ) else: self.right.insert(_lowerCAmelCase ) else: UpperCAmelCase_ =val def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' if root: inorder(root.left , lowercase__ ) res.append(root.val ) inorder(root.right , lowercase__ ) def a__ ( lowercase__ ): '''simple docstring''' if len(lowercase__ ) == 0: return arr UpperCAmelCase_ =Node(arr[0] ) for i in range(1 , len(lowercase__ ) ): root.insert(arr[i] ) # Traverse BST in order. UpperCAmelCase_ =[] inorder(lowercase__ , lowercase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
54
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ : Dict = random.Random() if is_torch_available(): import torch def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ): if rng is None: SCREAMING_SNAKE_CASE__: Tuple= global_rng SCREAMING_SNAKE_CASE__: List[str]= [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]: SCREAMING_SNAKE_CASE__: Optional[Any]= parent SCREAMING_SNAKE_CASE__: Dict= batch_size SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length SCREAMING_SNAKE_CASE__: Dict= max_seq_length SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__: Dict= feature_size SCREAMING_SNAKE_CASE__: str= padding_value SCREAMING_SNAKE_CASE__: Dict= sampling_rate SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask SCREAMING_SNAKE_CASE__: str= do_normalize def UpperCamelCase_ ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict: def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__: int= [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = ASTFeatureExtractor def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self ) def UpperCamelCase_ ( self ) -> Any: # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) @require_torch def UpperCamelCase_ ( self ) -> Dict: import torch SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]: from datasets import load_dataset SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def UpperCamelCase_ ( self ) -> str: # fmt: off SCREAMING_SNAKE_CASE__: str= torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor() SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
64
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset SCREAMING_SNAKE_CASE :List[str] = pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) SCREAMING_SNAKE_CASE :List[str] = dataset.iloc[:, 1:2].values SCREAMING_SNAKE_CASE :int = dataset.iloc[:, 2].values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[Any] = train_test_split(X, y, test_size=0.2, random_state=0) SCREAMING_SNAKE_CASE :Union[str, Any] = PolynomialFeatures(degree=4) SCREAMING_SNAKE_CASE :Tuple = poly_reg.fit_transform(X) SCREAMING_SNAKE_CASE :List[Any] = LinearRegression() pol_reg.fit(X_poly, y) def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" plt.scatter(a_ , a_ , color="red" ) plt.plot(a_ , pol_reg.predict(poly_reg.fit_transform(a_ ) ) , color="blue" ) plt.title("Truth or Bluff (Linear Regression)" ) plt.xlabel("Position level" ) plt.ylabel("Salary" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
55
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase_ : List[Any] = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
'''simple docstring''' import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class _lowercase ( __lowercase , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Dict = PriorTransformer _SCREAMING_SNAKE_CASE : Dict = "hidden_states" @property def a ( self : Optional[Any] ) -> List[Any]: __snake_case = 4 __snake_case = 8 __snake_case = 7 __snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Dict: torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __snake_case = 4 __snake_case = 8 __snake_case = 7 __snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def a ( self : Tuple ) -> Any: return (4, 8) @property def a ( self : Tuple ) -> Any: return (4, 8) def a ( self : List[Any] ) -> List[Any]: __snake_case = { 'num_attention_heads': 2, 'attention_head_dim': 4, 'num_layers': 2, 'embedding_dim': 8, 'num_embeddings': 7, 'additional_embeddings': 4, } __snake_case = self.dummy_input return init_dict, inputs_dict def a ( self : List[str] ) -> int: __snake_case , __snake_case = PriorTransformer.from_pretrained( 'hf-internal-testing/prior-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(SCREAMING_SNAKE_CASE_ ) __snake_case = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def a ( self : int ) -> Any: __snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common() __snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ ) __snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['hidden_states', 'timestep'] self.assertListEqual(arg_names[:2] , SCREAMING_SNAKE_CASE_ ) def a ( self : Union[str, Any] ) -> Dict: __snake_case = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' ) __snake_case = model.to(SCREAMING_SNAKE_CASE_ ) if hasattr(SCREAMING_SNAKE_CASE_ , 'set_default_attn_processor' ): model.set_default_attn_processor() __snake_case = self.get_dummy_seed_input() with torch.no_grad(): __snake_case = model(**SCREAMING_SNAKE_CASE_ )[0] __snake_case = output[0, :5].flatten().cpu() print(SCREAMING_SNAKE_CASE_ ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. __snake_case = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] ) self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) ) @slow class _lowercase ( unittest.TestCase ): def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : List[Any]=768 , SCREAMING_SNAKE_CASE_ : int=77 , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Any: torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __snake_case = batch_size __snake_case = embedding_dim __snake_case = num_embeddings __snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) __snake_case = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def a ( self : Tuple ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]], [37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]], # fmt: on ] ) def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]: __snake_case = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' ) model.to(SCREAMING_SNAKE_CASE_ ) __snake_case = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): __snake_case = model(**SCREAMING_SNAKE_CASE_ )[0] assert list(sample.shape ) == [1, 768] __snake_case = sample[0, :8].flatten().cpu() print(SCREAMING_SNAKE_CASE_ ) __snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ ) assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
56
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def A__ ( ): SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , ) SCREAMING_SNAKE_CASE__: Any= parser.parse_args() return args def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ): if not len(snake_case_ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size for i, img in enumerate(snake_case_ ): grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) ) return grid def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ): SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= pipeline( snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowercase_ : List[str] = parse_args() # Load models and create wrapper for stable diffusion lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') lowercase_ : Dict = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowercase_ : str = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id)) lowercase_ : str = pipeline.to(unet.device) lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
64
0
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def snake_case (UpperCAmelCase__ ) -> None: UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = analyze_text(UpperCAmelCase__ ) UpperCamelCase_: Any = list(' ' + ascii_lowercase ) # what is our total sum of probabilities. UpperCamelCase_: Any = sum(single_char_strings.values() ) # one length string UpperCamelCase_: List[Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: UpperCamelCase_: Any = single_char_strings[ch] UpperCamelCase_: Dict = my_str / all_sum my_fir_sum += prob * math.loga(UpperCAmelCase__ ) # entropy formula. # print entropy print(F'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string UpperCamelCase_: Optional[int] = sum(two_char_strings.values() ) UpperCamelCase_: List[str] = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: UpperCamelCase_: Optional[Any] = cha + cha if sequence in two_char_strings: UpperCamelCase_: int = two_char_strings[sequence] UpperCamelCase_: Any = int(UpperCAmelCase__ ) / all_sum my_sec_sum += prob * math.loga(UpperCAmelCase__ ) # print second entropy print(F'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def snake_case (UpperCAmelCase__ ) -> tuple[dict, dict]: UpperCamelCase_: List[str] = Counter() # type: ignore UpperCamelCase_: List[str] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCAmelCase__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def snake_case () -> Union[str, Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
57
from __future__ import annotations from collections import deque class _lowerCamelCase : def __init__( self , lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: list[dict]= [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(lowerCAmelCase ) self.set_fail_transitions() def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase_ ( self , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: str= 0 for character in keyword: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1 else: SCREAMING_SNAKE_CASE__: List[Any]= next_state self.adlist[current_state]["output"].append(lowerCAmelCase ) def UpperCamelCase_ ( self ) -> None: SCREAMING_SNAKE_CASE__: deque= deque() for node in self.adlist[0]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= 0 while q: SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state'''] while ( self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None and state != 0 ): SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Dict= self.find_next_state( lowerCAmelCase , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 SCREAMING_SNAKE_CASE__: str= ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]: SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences SCREAMING_SNAKE_CASE__: Optional[Any]= 0 for i in range(len(lowerCAmelCase ) ): while ( self.find_next_state(lowerCAmelCase , string[i] ) is None and current_state != 0 ): SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] ) if next_state is None: SCREAMING_SNAKE_CASE__: List[Any]= 0 else: SCREAMING_SNAKE_CASE__: Dict= next_state for key in self.adlist[current_state]["output"]: if key not in result: SCREAMING_SNAKE_CASE__: Optional[Any]= [] result[key].append(i - len(lowerCAmelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
64
0
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def __lowerCAmelCase ( __UpperCamelCase : int ): '''simple docstring''' if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) snake_case_ : str = precision snake_case_ : Any = ceil(precision / 1_4 ) snake_case_ : Dict = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() snake_case_ : Optional[Any] = 1 snake_case_ : List[str] = 1_3_5_9_1_4_0_9 snake_case_ : Optional[int] = Decimal(__UpperCamelCase ) for k in range(1 , __UpperCamelCase ): snake_case_ : Any = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCamelCase ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __lowerCAmelCase : int = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
58
import numpy as np def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__: int= ya SCREAMING_SNAKE_CASE__: Tuple= xa for k in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] ) SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
64
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class _SCREAMING_SNAKE_CASE : '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[str]) ->Tuple: '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' raise NotImplementedError() class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Union[str, Any] , UpperCAmelCase_ : "AutoTokenizer" , UpperCAmelCase_ : bool = False , **UpperCAmelCase_ : Optional[int]) ->Tuple: '''simple docstring''' lowerCamelCase__: str =tokenizer lowerCamelCase__: str =skip_prompt lowerCamelCase__: str =decode_kwargs # variables used in the streaming process lowerCamelCase__: Dict =[] lowerCamelCase__: int =0 lowerCamelCase__: Tuple =True def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any) ->List[str]: '''simple docstring''' if len(value.shape) > 1 and value.shape[0] > 1: raise ValueError("TextStreamer only supports batch size 1") elif len(value.shape) > 1: lowerCamelCase__: Tuple =value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowerCamelCase__: List[str] =False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist()) lowerCamelCase__: Optional[Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs) # After the symbol for a new line, we flush the cache. if text.endswith("\n"): lowerCamelCase__: Optional[Any] =text[self.print_len :] lowerCamelCase__: Union[str, Any] =[] lowerCamelCase__: List[str] =0 # If the last token is a CJK character, we print the characters. elif len(UpperCAmelCase_) > 0 and self._is_chinese_char(ord(text[-1])): lowerCamelCase__: Dict =text[self.print_len :] self.print_len += len(UpperCAmelCase_) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowerCamelCase__: Any =text[self.print_len : text.rfind(" ") + 1] self.print_len += len(UpperCAmelCase_) self.on_finalized_text(UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict: '''simple docstring''' if len(self.token_cache) > 0: lowerCamelCase__: int =self.tokenizer.decode(self.token_cache , **self.decode_kwargs) lowerCamelCase__: Optional[int] =text[self.print_len :] lowerCamelCase__: int =[] lowerCamelCase__: str =0 else: lowerCamelCase__: Optional[int] ="" lowerCamelCase__: Optional[Any] =True self.on_finalized_text(UpperCAmelCase_ , stream_end=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False) ->Tuple: '''simple docstring''' print(UpperCAmelCase_ , flush=UpperCAmelCase_ , end="" if not stream_end else None) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]: '''simple docstring''' if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x2_0000 and cp <= 0x2_A6DF) # or (cp >= 0x2_A700 and cp <= 0x2_B73F) # or (cp >= 0x2_B740 and cp <= 0x2_B81F) # or (cp >= 0x2_B820 and cp <= 0x2_CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2_F800 and cp <= 0x2_FA1F) # ): # return True return False class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__(self : Dict , UpperCAmelCase_ : "AutoTokenizer" , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[float] = None , **UpperCAmelCase_ : List[str]) ->List[Any]: '''simple docstring''' super().__init__(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: int =Queue() lowerCamelCase__: List[Any] =None lowerCamelCase__: str =timeout def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False) ->Optional[int]: '''simple docstring''' self.text_queue.put(UpperCAmelCase_ , timeout=self.timeout) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout) def __iter__(self : Optional[int]) ->List[str]: '''simple docstring''' return self def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]: '''simple docstring''' lowerCamelCase__: str =self.text_queue.get(timeout=self.timeout) if value == self.stop_signal: raise StopIteration() else: return value
59
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' ) self.assertIsInstance(lowerCAmelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' ) self.assertIsInstance(lowerCAmelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
64
0
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( _a ): def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__magic_name__ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__magic_name__ , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__magic_name__ , '''num_attention_heads''' ) ) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=32 , __magic_name__=2 , __magic_name__=3 , __magic_name__=640 , __magic_name__=4 , __magic_name__="silu" , __magic_name__=3 , __magic_name__=32 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=10 , __magic_name__=None , ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : Union[str, Any] = image_size snake_case_ : Any = patch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Dict = last_hidden_size snake_case_ : Dict = num_attention_heads snake_case_ : str = hidden_act snake_case_ : Optional[int] = conv_kernel_size snake_case_ : str = output_stride snake_case_ : int = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = classifier_dropout_prob snake_case_ : Any = use_labels snake_case_ : Tuple = is_training snake_case_ : Dict = num_labels snake_case_ : Any = initializer_range snake_case_ : List[Any] = scope def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Tuple = None snake_case_ : Optional[int] = None if self.use_labels: snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case_ : int = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = MobileViTModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Dict = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Any = self.num_labels snake_case_ : Optional[int] = MobileViTForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Dict = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : int = MobileViTForSemanticSegmentation(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Dict = model(__magic_name__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) snake_case_ : List[Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = config_and_inputs snake_case_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase_ : Tuple = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase_ : Dict = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : int = False lowerCamelCase_ : int = False def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = MobileViTModelTester(self ) snake_case_ : int = MobileViTConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(__magic_name__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : int = [*signature.parameters.keys()] snake_case_ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' pass def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): snake_case_ : Optional[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): snake_case_ : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) snake_case_ : Tuple = outputs.hidden_states snake_case_ : Tuple = 5 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. snake_case_ : str = 2 for i in range(len(__magic_name__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Optional[Any] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = MobileViTModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : List[str] = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__magic_name__ ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[int] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) # verify the logits snake_case_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : str = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) ) @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : str = model.to(__magic_name__ ) snake_case_ : Optional[Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : Any = prepare_img() snake_case_ : Optional[Any] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**__magic_name__ ) snake_case_ : Optional[Any] = outputs.logits # verify the logits snake_case_ : List[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __magic_name__ ) snake_case_ : List[str] = torch.tensor( [ [[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]], [[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]], [[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]], ] , device=__magic_name__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) ) @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : Any = model.to(__magic_name__ ) snake_case_ : int = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : List[Any] = prepare_img() snake_case_ : str = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : str = model(**__magic_name__ ) snake_case_ : str = outputs.logits.detach().cpu() snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(50, 60)] ) snake_case_ : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __magic_name__ ) snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ ) snake_case_ : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __magic_name__ )
60
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase_ : Tuple = TypeVar('T') class _lowerCamelCase ( Generic[T] ): def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: Any | T= None SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr SCREAMING_SNAKE_CASE__: List[Any]= fnc self.build() def UpperCamelCase_ ( self ) -> None: for p in range(self.N - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None: p += self.N SCREAMING_SNAKE_CASE__: Union[str, Any]= v while p > 1: SCREAMING_SNAKE_CASE__: Any= p // 2 SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N SCREAMING_SNAKE_CASE__: T | None= None while l <= r: if l % 2 == 1: SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] ) if r % 2 == 0: SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] lowercase_ : str = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } lowercase_ : int = SegmentTree(test_array, min) lowercase_ : Optional[int] = SegmentTree(test_array, max) lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b) def A__ ( ): for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): lowercase_ : int = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
64
0
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = AlbertTokenizer snake_case__ = AlbertTokenizerFast snake_case__ = True snake_case__ = True snake_case__ = True def a ( self : Union[str, Any] ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(self.tmpdirname ) def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> int: lowerCAmelCase__ = "this is a test" lowerCAmelCase__ = "this is a test" return input_text, output_text def a ( self : Union[str, Any] ) -> Tuple: lowerCAmelCase__ = "<pad>" lowerCAmelCase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def a ( self : str ) -> List[str]: lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "▁eloquent" ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 30_000 ) def a ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a ( self : Any ) -> Dict: if not self.test_rust_tokenizer: return lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = "I was born in 92000, and this is falsé." lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def a ( self : Any ) -> Optional[int]: lowerCAmelCase__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁this", "▁is", "▁a", "▁test"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [48, 25, 21, 1_289] ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , ) def a ( self : Tuple ) -> int: lowerCAmelCase__ = AlbertTokenizer(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.encode("sequence builders" ) lowerCAmelCase__ = tokenizer.encode("multi-sequence build" ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def a ( self : Union[str, Any] ) -> Optional[Any]: # fmt: off lowerCAmelCase__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
61
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __a = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ) -> str: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= 2 SCREAMING_SNAKE_CASE__: Tuple= randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ) SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: Tuple= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> Tuple: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase_ ( self ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(lowerCAmelCase ): if isinstance(lowerCAmelCase , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) SCREAMING_SNAKE_CASE__: Any= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] ) SCREAMING_SNAKE_CASE__: int= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= 2 SCREAMING_SNAKE_CASE__: Tuple= [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), ] SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: int= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components() SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= 10.0 SCREAMING_SNAKE_CASE__: Any= 4 SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0] SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCamelCase_ ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components() SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(lowerCAmelCase ) except NotImplementedError: pass @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird''' SCREAMING_SNAKE_CASE__: List[str]= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: List[Any]= load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: Optional[Any]= pipe( lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE__: str= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
64
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding" ) ) self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier" ) ) class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : int=0.25 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Dict=8 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str="relu6" , UpperCAmelCase_ : List[str]=1280 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[Any]=None , ): SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : Dict = image_size SCREAMING_SNAKE_CASE : int = depth_multiplier SCREAMING_SNAKE_CASE : str = depth_divisible_by SCREAMING_SNAKE_CASE : Union[str, Any] = min_depth SCREAMING_SNAKE_CASE : int = expand_ratio SCREAMING_SNAKE_CASE : Tuple = tf_padding SCREAMING_SNAKE_CASE : List[str] = output_stride SCREAMING_SNAKE_CASE : Optional[int] = first_layer_is_expansion SCREAMING_SNAKE_CASE : Any = finegrained_output SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob SCREAMING_SNAKE_CASE : Dict = use_labels SCREAMING_SNAKE_CASE : int = is_training SCREAMING_SNAKE_CASE : Dict = num_labels SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = scope def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def _A ( self : Optional[int] ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : int = MobileNetVaModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : str = self.num_labels SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaForImageClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _A ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): SCREAMING_SNAKE_CASE : int = self.num_labels SCREAMING_SNAKE_CASE : Dict = MobileNetVaForSemanticSegmentation(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase_ : List[Any] = ( { '''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification, '''image-segmentation''': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : Any = False UpperCamelCase_ : List[str] = False UpperCamelCase_ : int = False UpperCamelCase_ : str = False def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ ) def _A ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds" ) def _A ( self : List[Any] ): pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings" ) def _A ( self : Dict ): pass @unittest.skip(reason="MobileNetV2 does not output attentions" ) def _A ( self : Union[str, Any] ): pass def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def _A ( self : List[Any] ): def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ): SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE : Any = 16 self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ ) def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ ) @slow def _A ( self : Optional[Any] ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : int = MobileNetVaModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' @cached_property def _A ( self : Optional[int] ): return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None ) @slow def _A ( self : Tuple ): SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor SCREAMING_SNAKE_CASE : Optional[int] = prepare_img() SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : str = model(**UpperCAmelCase_ ) # verify the logits SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(UpperCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) ) @slow def _A ( self : str ): SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) SCREAMING_SNAKE_CASE : int = model.to(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img() SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = outputs.logits # verify the logits SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : str = torch.tensor( [ [[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]], [[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]], [[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]], ] , device=UpperCAmelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
62
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _lowerCamelCase : __a = 42 # setable values __a = 42 __a = 42 __a = None @classmethod def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]: return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase ) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = 42 class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ): __a = [e.name for e in FlaxKarrasDiffusionSchedulers] __a = 42 @property def UpperCamelCase_ ( self ) -> List[Any]: return True @register_to_config def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= dtype def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState: if common is None: SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype ) SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray: return sample def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState: SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]: SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": SCREAMING_SNAKE_CASE__: List[Any]= variance SCREAMING_SNAKE_CASE__: Any= state.common.betas[t] SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep if key is None: SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 ) else: SCREAMING_SNAKE_CASE__: Any= None # 1. compute alphas, betas SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE__: str= model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 ) SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def __len__( self ) -> Tuple: return self.config.num_train_timesteps
64
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class a ( lowercase__ , unittest.TestCase ): """simple docstring""" a : int = KandinskyVaaInpaintPipeline a : Any = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image'] a : Any = [ 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] a : Any = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] a : List[Any] = False @property def UpperCAmelCase ( self : int ) -> Dict: return 32 @property def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]: return 32 @property def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: return self.time_input_dim @property def UpperCAmelCase ( self : str ) -> List[str]: return self.time_input_dim * 4 @property def UpperCAmelCase ( self : Tuple ) -> List[str]: return 100 @property def UpperCAmelCase ( self : Dict ) -> Any: torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __UpperCAmelCase : int = UNetaDConditionModel(**__lowercase ) return model @property def UpperCAmelCase ( self : int ) -> int: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase ( self : Dict ) -> List[str]: torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase ( self : Any ) -> List[Any]: __UpperCAmelCase : List[str] = self.dummy_unet __UpperCAmelCase : List[str] = self.dummy_movq __UpperCAmelCase : Optional[Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__lowercase , set_alpha_to_one=__lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowercase , ) __UpperCAmelCase : str = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCAmelCase ( self : str , __lowercase : Tuple , __lowercase : List[str]=0 ) -> Optional[Any]: __UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) __UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image __UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) __UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert("""RGB""" ).resize((256, 256) ) # create mask __UpperCAmelCase : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa ) __UpperCAmelCase : List[str] = 0 if str(__lowercase ).startswith("""mps""" ): __UpperCAmelCase : List[str] = torch.manual_seed(__lowercase ) else: __UpperCAmelCase : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __UpperCAmelCase : Optional[Any] = { """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = """cpu""" __UpperCAmelCase : Dict = self.get_dummy_components() __UpperCAmelCase : str = self.pipeline_class(**__lowercase ) __UpperCAmelCase : Tuple = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__lowercase ) ) __UpperCAmelCase : Tuple = output.images __UpperCAmelCase : Optional[int] = pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] __UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] __UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : Optional[Any] = np.array( [0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def UpperCAmelCase ( self : str ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class a ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self : Union[str, Any] ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) __UpperCAmelCase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __UpperCAmelCase : List[Any] = np.ones((768, 768) , dtype=np.floataa ) __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Tuple = """a hat""" __UpperCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) __UpperCAmelCase : Any = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa ) __UpperCAmelCase : int = pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) __UpperCAmelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __UpperCAmelCase : Optional[int] = pipeline( image=__lowercase , mask_image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) __UpperCAmelCase : List[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
63
def A__ ( snake_case_ : int ): if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 SCREAMING_SNAKE_CASE__: List[str]= 1 if upper_limit > 0: SCREAMING_SNAKE_CASE__: List[str]= 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(snake_case_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: lowercase_ : Any = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
64
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class __lowercase ( __lowerCamelCase ): snake_case_ = """swin2sr""" snake_case_ = { """hidden_size""": """embed_dim""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] ,A : Tuple=64 ,A : Tuple=1 ,A : List[Any]=3 ,A : Dict=180 ,A : int=[6, 6, 6, 6, 6, 6] ,A : Optional[Any]=[6, 6, 6, 6, 6, 6] ,A : Any=8 ,A : Optional[int]=2.0 ,A : Tuple=True ,A : int=0.0 ,A : Dict=0.0 ,A : int=0.1 ,A : List[Any]="gelu" ,A : Optional[Any]=False ,A : Union[str, Any]=0.0_2 ,A : str=1e-5 ,A : Optional[int]=2 ,A : int=1.0 ,A : Any="1conv" ,A : Tuple="pixelshuffle" ,**A : Optional[int] ,): '''simple docstring''' super().__init__(**A ) UpperCAmelCase__ : List[Any] = image_size UpperCAmelCase__ : List[str] = patch_size UpperCAmelCase__ : Dict = num_channels UpperCAmelCase__ : str = embed_dim UpperCAmelCase__ : List[Any] = depths UpperCAmelCase__ : Union[str, Any] = len(A ) UpperCAmelCase__ : str = num_heads UpperCAmelCase__ : int = window_size UpperCAmelCase__ : int = mlp_ratio UpperCAmelCase__ : Any = qkv_bias UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[Any] = drop_path_rate UpperCAmelCase__ : Optional[int] = hidden_act UpperCAmelCase__ : List[Any] = use_absolute_embeddings UpperCAmelCase__ : Union[str, Any] = layer_norm_eps UpperCAmelCase__ : List[str] = initializer_range UpperCAmelCase__ : Dict = upscale UpperCAmelCase__ : str = img_range UpperCAmelCase__ : int = resi_connection UpperCAmelCase__ : str = upsampler
65
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
64
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> YolosConfig: _lowercase : List[Any] = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: _lowercase : List[str] = 192 _lowercase : Optional[int] = 768 _lowercase : int = 12 _lowercase : Union[str, Any] = 3 _lowercase : Dict = [800, 1_333] _lowercase : Any = False elif yolos_name == "yolos_s_dWr": _lowercase : Any = 330 _lowercase : Dict = 14 _lowercase : Optional[Any] = 6 _lowercase : List[str] = 1_320 elif "yolos_s" in yolos_name: _lowercase : Union[str, Any] = 384 _lowercase : Optional[Any] = 1_536 _lowercase : Tuple = 12 _lowercase : Tuple = 6 elif "yolos_b" in yolos_name: _lowercase : Optional[int] = [800, 1_344] _lowercase : int = 91 _lowercase : Union[str, Any] = 'huggingface/label-files' _lowercase : List[Any] = 'coco-detection-id2label.json' _lowercase : Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) _lowercase : Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _lowercase : Union[str, Any] = idalabel _lowercase : Optional[int] = {v: k for k, v in idalabel.items()} return config def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> str: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowercase : str = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) _lowercase : Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase : int = in_proj_weight[: config.hidden_size, :] _lowercase : Union[str, Any] = in_proj_bias[: config.hidden_size] _lowercase : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowercase : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowercase : Tuple = in_proj_weight[-config.hidden_size :, :] _lowercase : Tuple = in_proj_bias[-config.hidden_size :] def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: if "backbone" in name: _lowercase : Tuple = name.replace('backbone' , 'vit' ) if "cls_token" in name: _lowercase : Optional[int] = name.replace('cls_token' , 'embeddings.cls_token' ) if "det_token" in name: _lowercase : List[Any] = name.replace('det_token' , 'embeddings.detection_tokens' ) if "mid_pos_embed" in name: _lowercase : Optional[int] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' ) if "pos_embed" in name: _lowercase : Optional[int] = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: _lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "blocks" in name: _lowercase : List[str] = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: _lowercase : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: _lowercase : Any = name.replace('attn' , 'attention.self' ) if "norm1" in name: _lowercase : Tuple = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _lowercase : Tuple = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : Any = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[str] = name.replace('mlp.fc2' , 'output.dense' ) if "class_embed" in name: _lowercase : Optional[Any] = name.replace('class_embed' , 'class_labels_classifier' ) if "bbox_embed" in name: _lowercase : Dict = name.replace('bbox_embed' , 'bbox_predictor' ) if "vit.norm" in name: _lowercase : List[Any] = name.replace('vit.norm' , 'vit.layernorm' ) return name def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict: for key in orig_state_dict.copy().keys(): _lowercase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE ) if "qkv" in key: _lowercase : List[str] = key.split('.' ) _lowercase : Optional[Any] = int(key_split[2] ) _lowercase : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: _lowercase : Union[str, Any] = val[:dim, :] _lowercase : Optional[int] = val[ dim : dim * 2, : ] _lowercase : Dict = val[-dim:, :] else: _lowercase : Optional[int] = val[:dim] _lowercase : str = val[dim : dim * 2] _lowercase : Union[str, Any] = val[-dim:] else: _lowercase : Tuple = val return orig_state_dict def __magic_name__ ( ) -> torch.Tensor: _lowercase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Any: _lowercase : Dict = get_yolos_config(SCREAMING_SNAKE_CASE ) # load original state_dict _lowercase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # load 🤗 model _lowercase : Optional[int] = YolosForObjectDetection(SCREAMING_SNAKE_CASE ) model.eval() _lowercase : int = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by YolosImageProcessor _lowercase : Optional[Any] = 800 if yolos_name != 'yolos_ti' else 512 _lowercase : List[str] = YolosImageProcessor(format='coco_detection' , size=SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' ) _lowercase : List[Any] = model(**SCREAMING_SNAKE_CASE ) _lowercase , _lowercase : Tuple = outputs.logits, outputs.pred_boxes _lowercase , _lowercase : Union[str, Any] = None, None if yolos_name == "yolos_ti": _lowercase : List[Any] = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) _lowercase : Tuple = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": _lowercase : List[str] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) _lowercase : Union[str, Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": _lowercase : Optional[Any] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) _lowercase : List[Any] = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": _lowercase : Any = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) _lowercase : Any = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": _lowercase : Optional[int] = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) _lowercase : Dict = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: _lowercase : Union[str, Any] = { 'yolos_ti': 'yolos-tiny', 'yolos_s_200_pre': 'yolos-small', 'yolos_s_300_pre': 'yolos-small-300', 'yolos_s_dWr': 'yolos-small-dwr', 'yolos_base': 'yolos-base', } print('Pushing to the hub...' ) _lowercase : str = model_mapping[yolos_name] image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization='hustvl' ) model.push_to_hub(SCREAMING_SNAKE_CASE , organization='hustvl' ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--yolos_name", default="yolos_s_200_pre", type=str, help=( "Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre'," " 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'." ), ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
66
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ : Optional[int] = logging.get_logger(__name__) def A__ ( snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights SCREAMING_SNAKE_CASE__: List[str]= [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: str= { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: int= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE__: int= sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' ) SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE__: List[Any]= q SCREAMING_SNAKE_CASE__: Any= k SCREAMING_SNAKE_CASE__: Optional[Any]= v del sd[key] return sd @torch.no_grad() def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ): SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ ) if config is not None: SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ ) else: SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig() SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval() model.load_state_dict(snake_case_ ) # Check results Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') lowercase_ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
64
0
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue_model_parallelism.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, ] ) class A_ ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Any ) -> Optional[int]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,) assert hasattr(self ,'env' ) def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int: # configuration for running training on smdistributed Model Parallel _lowercase = { 'enabled': True, 'processes_per_host': 8, } _lowercase = { 'enabled': True, 'parameters': { 'microbatches': 4, 'placement_strategy': 'spread', 'pipeline': 'interleaved', 'optimize': 'speed', 'partitions': 4, 'ddp': True, }, } _lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options} _lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer' # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={ **self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path, 'max_steps': 500, } ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,) def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]: TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]: # create estimator _lowercase = self.create_estimator(__A ) # run training estimator.fit() # result dataframe _lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) _lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _lowercase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A )
67
def A__ ( snake_case_ : float , snake_case_ : float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
64
0
from ....configuration_utils import PretrainedConfig from ....utils import logging __A = logging.get_logger(__name__) # TODO: upload to AWS __A = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : Any = 'retribert' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int=30522 , __SCREAMING_SNAKE_CASE : List[Any]=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : List[str]=3072 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Any=1e-12 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=128 , __SCREAMING_SNAKE_CASE : str=0 , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[Any]: super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =hidden_act __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =initializer_range __UpperCAmelCase =layer_norm_eps __UpperCAmelCase =share_encoders __UpperCAmelCase =projection_dim
68
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Any = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> Union[str, Any]: __snake_case = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]: __snake_case , __snake_case = emb.weight.shape __snake_case = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __snake_case = emb.weight.data return lin_layer def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[Any]: __snake_case = torch.load(_UpperCAmelCase , map_location="cpu" ) __snake_case = mam_aaa["args"] or mam_aaa["cfg"]["model"] __snake_case = mam_aaa["model"] remove_ignore_keys_(_UpperCAmelCase ) __snake_case = state_dict["encoder.embed_tokens.weight"].shape[0] __snake_case = MaMaaaConfig( vocab_size=_UpperCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , ) __snake_case = state_dict["decoder.embed_tokens.weight"] __snake_case = MaMaaaForConditionalGeneration(_UpperCAmelCase ) model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) __snake_case = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') a : Optional[int] = parser.parse_args() a : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
69
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __a = Features({"text": Value("string" )} ) __a = Features({"labels": ClassLabel} ) __a = "text" __a = "labels" def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self ) SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy() SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column] SCREAMING_SNAKE_CASE__: List[str]= label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
64
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" debug_launcher(test_script.main ) def a__ ( self : Tuple ) -> str: """simple docstring""" debug_launcher(test_ops.main )
70
import inspect import unittest class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion''' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE__: int= '''invisible-watermark''' assert backend in deps, f'{backend} is not in the deps table!'
64
0
'''simple docstring''' import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } _lowerCamelCase = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } _lowerCamelCase = { """jukebox""": 512, } class _snake_case (__SCREAMING_SNAKE_CASE): __A : int =VOCAB_FILES_NAMES __A : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP __A : Any =PRETRAINED_LYRIC_TOKENS_SIZES __A : Optional[Any] =["input_ids", "attention_mask"] def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=["v3", "v2", "v2"] ,_snake_case=5_12 ,_snake_case=5 ,_snake_case="<|endoftext|>" ,**_snake_case ,): UpperCAmelCase_ : Union[str, Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else unk_token super().__init__( unk_token=_snake_case ,n_genres=_snake_case ,version=_snake_case ,max_n_lyric_tokens=_snake_case ,**_snake_case ,) UpperCAmelCase_ : Union[str, Any] = version UpperCAmelCase_ : List[str] = max_n_lyric_tokens UpperCAmelCase_ : Optional[Any] = n_genres with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : int = json.load(_snake_case ) with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : Union[str, Any] = json.load(_snake_case ) with open(_snake_case ,encoding="utf-8" ) as vocab_handle: UpperCAmelCase_ : List[str] = json.load(_snake_case ) UpperCAmelCase_ : Union[str, Any] = R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: UpperCAmelCase_ : str = oov.replace(R"\-'" ,R"\-+'" ) UpperCAmelCase_ : List[Any] = regex.compile(_snake_case ) UpperCAmelCase_ : Dict = {v: k for k, v in self.artists_encoder.items()} UpperCAmelCase_ : int = {v: k for k, v in self.genres_encoder.items()} UpperCAmelCase_ : str = {v: k for k, v in self.lyrics_encoder.items()} @property def UpperCamelCase__ ( self ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def UpperCamelCase__ ( self ): return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : str = [self.artists_encoder.get(_snake_case ,0 ) for artist in list_artists] for genres in range(len(_snake_case ) ): UpperCAmelCase_ : List[Any] = [self.genres_encoder.get(_snake_case ,0 ) for genre in list_genres[genres]] UpperCAmelCase_ : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) UpperCAmelCase_ : List[Any] = [[self.lyrics_encoder.get(_snake_case ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def UpperCamelCase__ ( self ,_snake_case ): return list(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ): UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_for_tokenization(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : str = self._tokenize(_snake_case ) return artist, genre, lyrics def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": UpperCAmelCase_ : Optional[int] = artists[idx].lower() UpperCAmelCase_ : Union[str, Any] = [genres[idx].lower()] else: UpperCAmelCase_ : Optional[Any] = self._normalize(artists[idx] ) + ".v2" UpperCAmelCase_ : Optional[Any] = [ self._normalize(_snake_case ) + ".v2" for genre in genres[idx].split("_" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": UpperCAmelCase_ : Union[str, Any] = regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" ) UpperCAmelCase_ : Tuple = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" UpperCAmelCase_ : Tuple = {vocab[index]: index + 1 for index in range(len(_snake_case ) )} UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Any = len(_snake_case ) + 1 UpperCAmelCase_ : Any = self.vocab UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.vocab.items()} UpperCAmelCase_ : Union[str, Any] = "" else: UpperCAmelCase_ : Optional[Any] = regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" ) UpperCAmelCase_ : str = self._run_strip_accents(_snake_case ) UpperCAmelCase_ : int = lyrics.replace("\\" ,"\n" ) UpperCAmelCase_ : List[Any] = self.out_of_vocab.sub("" ,_snake_case ), [], [] return artists, genres, lyrics def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Any = unicodedata.normalize("NFD" ,_snake_case ) UpperCAmelCase_ : str = [] for char in text: UpperCAmelCase_ : List[str] = unicodedata.category(_snake_case ) if cat == "Mn": continue output.append(_snake_case ) return "".join(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ): UpperCAmelCase_ : Optional[Any] = ( [chr(_snake_case ) for i in range(ord("a" ) ,ord("z" ) + 1 )] + [chr(_snake_case ) for i in range(ord("A" ) ,ord("Z" ) + 1 )] + [chr(_snake_case ) for i in range(ord("0" ) ,ord("9" ) + 1 )] + ["."] ) UpperCAmelCase_ : Optional[Any] = frozenset(_snake_case ) UpperCAmelCase_ : List[Any] = re.compile(R"_+" ) UpperCAmelCase_ : int = "".join([c if c in accepted else "_" for c in text.lower()] ) UpperCAmelCase_ : int = pattern.sub("_" ,_snake_case ).strip("_" ) return text def UpperCamelCase__ ( self ,_snake_case ): return " ".join(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ): # Convert to TensorType if not isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : str = TensorType(_snake_case ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf UpperCAmelCase_ : Tuple = tf.constant UpperCAmelCase_ : Optional[int] = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." ) import torch UpperCAmelCase_ : Tuple = torch.tensor UpperCAmelCase_ : str = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." ) import jax.numpy as jnp # noqa: F811 UpperCAmelCase_ : List[str] = jnp.array UpperCAmelCase_ : Optional[int] = _is_jax else: UpperCAmelCase_ : Optional[Any] = np.asarray UpperCAmelCase_ : str = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: UpperCAmelCase_ : Union[str, Any] = [inputs] if not is_tensor(_snake_case ): UpperCAmelCase_ : Any = as_tensor(_snake_case ) except: # noqa E722 raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length." ) return inputs def __call__( self ,_snake_case ,_snake_case ,_snake_case="" ,_snake_case="pt" ): UpperCAmelCase_ : Optional[int] = [0, 0, 0] UpperCAmelCase_ : List[Any] = [artist] * len(self.version ) UpperCAmelCase_ : List[Any] = [genres] * len(self.version ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self.tokenize(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._convert_token_to_id(_snake_case ,_snake_case ,_snake_case ) UpperCAmelCase_ : Optional[Any] = [-INFINITY] * len(full_tokens[-1] ) UpperCAmelCase_ : Optional[int] = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=_snake_case ) for i in range(len(self.version ) ) ] return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ): if not os.path.isdir(_snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCAmelCase_ : Dict = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=_snake_case ) ) UpperCAmelCase_ : Any = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=_snake_case ) ) UpperCAmelCase_ : int = os.path.join( _snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] ) with open(_snake_case ,"w" ,encoding="utf-8" ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=_snake_case ) ) return (artists_file, genres_file, lyrics_file) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ): UpperCAmelCase_ : Tuple = self.artists_decoder.get(_snake_case ) UpperCAmelCase_ : Union[str, Any] = [self.genres_decoder.get(_snake_case ) for genre in genres_index] UpperCAmelCase_ : str = [self.lyrics_decoder.get(_snake_case ) for character in lyric_index] return artist, genres, lyrics
71
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , ) assert hasattr(self , '''env''' ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: # configuration for running training on smdistributed Model Parallel SCREAMING_SNAKE_CASE__: Optional[Any]= { '''enabled''': True, '''processes_per_host''': 8, } SCREAMING_SNAKE_CASE__: Dict= { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: # create estimator SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase ) # run training estimator.fit() # result dataframe SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping SCREAMING_SNAKE_CASE__: List[Any]= ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
64
0
'''simple docstring''' from __future__ import annotations import time import numpy as np _UpperCAmelCase : int = [8, 5, 9, 7] _UpperCAmelCase : List[str] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _UpperCAmelCase : Union[str, Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __magic_name__ : def __init__( self , snake_case_ , snake_case_ , snake_case_ , ): lowercase =claim_vector lowercase =allocated_resources_table lowercase =maximum_claim_table def _A( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _A( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _A( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _A( self ): return {self.__need().index(snake_case_ ): i for i in self.__need()} def _A( self , **snake_case_ ): lowercase =self.__need() lowercase =self.__allocated_resources_table lowercase =self.__available_resources() lowercase =self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: lowercase =False for each_need in need_list: lowercase =True for index, need in enumerate(snake_case_ ): if need > available_resources[index]: lowercase =False break if execution: lowercase =True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase =original_need_index print(f'Process {process_number + 1} is executing.' ) # remove the process run from stack need_list.remove(snake_case_ ) # update available/freed resources stack lowercase =np.array(snake_case_ ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(snake_case_ ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def _A( self ): print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}' + ''' '''.join(f'{it:>8}' for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}' + ''' '''.join(f'{it:>8}' for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
72
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): @property def UpperCamelCase_ ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions() SCREAMING_SNAKE_CASE__: List[str]= False return options def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__: int= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__: Tuple= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__: Any= pipe( prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__: Any= output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
64
0
from __future__ import annotations a_ : Dict = 10 def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = max(_UpperCAmelCase) while placement <= max_digit: # declare and initialize empty buckets SCREAMING_SNAKE_CASE = [[] for _ in range(_UpperCAmelCase)] # split list_of_ints between the buckets for i in list_of_ints: SCREAMING_SNAKE_CASE = int((i / placement) % RADIX) buckets[tmp].append(_UpperCAmelCase) # put each buckets' contents into list_of_ints SCREAMING_SNAKE_CASE = 0 for b in range(_UpperCAmelCase): for i in buckets[b]: SCREAMING_SNAKE_CASE = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
73
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **lowerCAmelCase ) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:] setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript ) SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**lowerCAmelCase ) __a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) __a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) __a = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 elif is_torch_tpu_available(): SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device() SCREAMING_SNAKE_CASE__: Any= 0 else: SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count() return device, n_gpu @property def UpperCamelCase_ ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCamelCase_ ( self ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def UpperCamelCase_ ( self ) -> str: return self.n_gpu > 0
64
0
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowercase_ = logging.getLogger(__name__) def a__ ( snake_case=2 , snake_case=3 , snake_case=16 , snake_case = 10 , snake_case = 2 ): """simple docstring""" def get_dataset(snake_case ): __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __SCREAMING_SNAKE_CASE : str = get_dataset(snake_case ) __SCREAMING_SNAKE_CASE : Dict = get_dataset(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) __SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [] for epoch in range(snake_case ): # Train quickly model.train() for batch in dataloader: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = batch __SCREAMING_SNAKE_CASE : List[Any] = model(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(snake_case , snake_case ) accelerator.backward(snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __UpperCamelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.randn(1 ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.randn(1 ) ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] ): """simple docstring""" return x * self.a + self.b class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[Any] = DummyModel() __SCREAMING_SNAKE_CASE : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dummy_dataloaders() __SCREAMING_SNAKE_CASE : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A ) # Train baseline __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare( _A , _A , _A , _A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel() __SCREAMING_SNAKE_CASE : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dummy_dataloaders() # Train baseline __SCREAMING_SNAKE_CASE : Optional[int] = Accelerator() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare( _A , _A , _A , _A ) # Save initial __SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_A , '''initial''' ) accelerator.save_state(_A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Dict = optimizer.state_dict() __SCREAMING_SNAKE_CASE : Tuple = train(3 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : List[Any] = optimizer.state_dict() # Train partially set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = DummyModel() __SCREAMING_SNAKE_CASE : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dummy_dataloaders() __SCREAMING_SNAKE_CASE : List[str] = Accelerator() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( _A , _A , _A , _A ) accelerator.load_state(_A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Dict = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Tuple = train(2 , _A , _A , _A , _A ) # Save everything __SCREAMING_SNAKE_CASE : Dict = os.path.join(_A , '''checkpoint''' ) accelerator.save_state(_A ) # Load everything back in and make sure all states work accelerator.load_state(_A ) test_rands += train(1 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Tuple = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : str = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : List[str] = DummyModel() __SCREAMING_SNAKE_CASE : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dummy_dataloaders() __SCREAMING_SNAKE_CASE : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_A ) # Train baseline __SCREAMING_SNAKE_CASE : Any = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = accelerator.prepare( _A , _A , _A , _A ) # Save initial accelerator.save_state() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Tuple = optimizer.state_dict() __SCREAMING_SNAKE_CASE : Any = train(3 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Tuple = optimizer.state_dict() # Train partially set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = DummyModel() __SCREAMING_SNAKE_CASE : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = dummy_dataloaders() __SCREAMING_SNAKE_CASE : Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A ) __SCREAMING_SNAKE_CASE : List[str] = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare( _A , _A , _A , _A ) accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[Any] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : Optional[int] = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[int] = train(2 , _A , _A , _A , _A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , _A , _A , _A , _A ) ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE : int = optimizer.state_dict() self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) self.assertEqual(_A , _A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3] ) __SCREAMING_SNAKE_CASE : str = torch.tensor([2, 3, 4] ) __SCREAMING_SNAKE_CASE : List[str] = DummyModel() __SCREAMING_SNAKE_CASE : int = torch.optim.Adam(net.parameters() ) __SCREAMING_SNAKE_CASE : int = Accelerator() with self.assertRaises(_A ) as ve: accelerator.register_for_checkpointing(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[str] = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def UpperCAmelCase__ ( self : str ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : List[str] = DummyModel() __SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 ) __SCREAMING_SNAKE_CASE : Tuple = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dummy_dataloaders() __SCREAMING_SNAKE_CASE : int = ProjectConfiguration(automatic_checkpoint_naming=_A ) # Train baseline __SCREAMING_SNAKE_CASE : List[Any] = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( _A , _A , _A , _A , _A ) # Save initial accelerator.save_state() __SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.state_dict() train(3 , _A , _A , _A , _A , _A ) self.assertNotEqual(_A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(_A , scheduler.state_dict() ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE : Optional[int] = DummyModel() __SCREAMING_SNAKE_CASE : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 ) # Train baseline __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(project_dir=_A , project_config=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(_A ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_A , env=os.environ.copy() ) if __name__ == "__main__": lowercase_ = """/tmp/accelerate/state_checkpointing""" lowercase_ = DummyModel() lowercase_ = torch.optim.Adam(params=model.parameters(), lr=1e-3) lowercase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowercase_ , lowercase_ = dummy_dataloaders() lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowercase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowercase_ , lowercase_ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowercase_ = group["""params"""][0].device break assert param_device.type == accelerator.device.type lowercase_ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: lowercase_ = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: lowercase_ = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str: SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30} SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} SCREAMING_SNAKE_CASE__: Dict= parent SCREAMING_SNAKE_CASE__: List[str]= batch_size SCREAMING_SNAKE_CASE__: int= num_channels SCREAMING_SNAKE_CASE__: int= min_resolution SCREAMING_SNAKE_CASE__: List[Any]= max_resolution SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop SCREAMING_SNAKE_CASE__: Union[str, Any]= size SCREAMING_SNAKE_CASE__: Dict= crop_pct SCREAMING_SNAKE_CASE__: Optional[int]= crop_size SCREAMING_SNAKE_CASE__: Dict= do_normalize SCREAMING_SNAKE_CASE__: List[str]= image_mean SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std def UpperCamelCase_ ( self ) -> Tuple: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase_ ( self ) -> Tuple: pass def UpperCamelCase_ ( self ) -> Optional[int]: # Initialize image_processing SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> int: # Initialize image_processing SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
64
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'switch_transformers' lowerCAmelCase__ = ['past_key_values'] lowerCAmelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Any , _A : List[Any]=32_128 , _A : Dict=768 , _A : List[str]=64 , _A : List[Any]=2_048 , _A : List[Any]=64 , _A : int=12 , _A : int=3 , _A : List[Any]=12 , _A : Any=3 , _A : Optional[Any]=12 , _A : Dict=8 , _A : int=False , _A : int=0.0_1 , _A : int="float32" , _A : Union[str, Any]=False , _A : Tuple=32 , _A : Any=128 , _A : str=0.1 , _A : List[Any]=1e-6 , _A : str=0.0_0_1 , _A : Optional[int]=0.0_0_1 , _A : Optional[int]=1.0 , _A : List[Any]="relu" , _A : List[str]=True , _A : Optional[int]=False , _A : int=True , _A : List[Any]=0 , _A : List[str]=1 , **_A : List[str] , ): '''simple docstring''' UpperCAmelCase__ : Dict = vocab_size UpperCAmelCase__ : Union[str, Any] = d_model UpperCAmelCase__ : List[Any] = d_kv UpperCAmelCase__ : List[Any] = d_ff UpperCAmelCase__ : Any = num_sparse_encoder_layers UpperCAmelCase__ : List[str] = num_layers UpperCAmelCase__ : Optional[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase__ : List[str] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: UpperCAmelCase__ : int = self.num_layers // self.num_sparse_encoder_layers else: UpperCAmelCase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: UpperCAmelCase__ : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers else: UpperCAmelCase__ : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers UpperCAmelCase__ : Tuple = num_heads UpperCAmelCase__ : Tuple = num_experts UpperCAmelCase__ : Union[str, Any] = expert_capacity UpperCAmelCase__ : str = router_bias UpperCAmelCase__ : Any = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) UpperCAmelCase__ : Any = router_dtype UpperCAmelCase__ : str = router_ignore_padding_tokens UpperCAmelCase__ : str = relative_attention_num_buckets UpperCAmelCase__ : str = relative_attention_max_distance UpperCAmelCase__ : Optional[Any] = dropout_rate UpperCAmelCase__ : Tuple = layer_norm_epsilon UpperCAmelCase__ : Optional[int] = initializer_factor UpperCAmelCase__ : int = feed_forward_proj UpperCAmelCase__ : Optional[int] = use_cache UpperCAmelCase__ : Optional[int] = add_router_probs UpperCAmelCase__ : Any = router_z_loss_coef UpperCAmelCase__ : Tuple = router_aux_loss_coef UpperCAmelCase__ : List[str] = self.feed_forward_proj.split('''-''' ) UpperCAmelCase__ : Union[str, Any] = act_info[-1] UpperCAmelCase__ : Any = act_info[0] == '''gated''' if len(_A ) > 1 and act_info[0] != "gated" or len(_A ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase__ : List[Any] = '''gelu_new''' super().__init__( pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
75
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase_ : Tuple = 3 def A__ ( snake_case_ : int ): print('''Generating primitive root of p''' ) while True: SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ ) if pow(snake_case_ , 2 , snake_case_ ) == 1: continue if pow(snake_case_ , snake_case_ , snake_case_ ) == 1: continue return g def A__ ( snake_case_ : int ): print('''Generating prime p...''' ) SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number. SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d) return public_key, private_key def A__ ( snake_case_ : str , snake_case_ : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , '''w''' ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def A__ ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
64
0
"""simple docstring""" # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a_ = '3' print('Python version:', sys.version) print('OS platform:', platform.platform()) print('OS architecture:', platform.machine()) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) except ImportError: print('Torch version:', None) try: import transformers print('transformers version:', transformers.__version__) except ImportError: print('transformers version:', None)
76
from math import factorial def A__ ( snake_case_ : int , snake_case_ : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(5_2, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(4_0, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(1_0, 3)} ways that first, second and''', 'third place can be awarded.', )
64
0
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase = 100 ) -> int: """simple docstring""" __UpperCAmelCase : Any = sum(i * i for i in range(1 , n + 1 ) ) __UpperCAmelCase : Union[str, Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
77
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ : Dict = random.Random() if is_torch_available(): import torch def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ): if rng is None: SCREAMING_SNAKE_CASE__: Tuple= global_rng SCREAMING_SNAKE_CASE__: List[str]= [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]: SCREAMING_SNAKE_CASE__: Optional[Any]= parent SCREAMING_SNAKE_CASE__: Dict= batch_size SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length SCREAMING_SNAKE_CASE__: Dict= max_seq_length SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__: Dict= feature_size SCREAMING_SNAKE_CASE__: str= padding_value SCREAMING_SNAKE_CASE__: Dict= sampling_rate SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask SCREAMING_SNAKE_CASE__: str= do_normalize def UpperCamelCase_ ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict: def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__: int= [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = ASTFeatureExtractor def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self ) def UpperCamelCase_ ( self ) -> Any: # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) @require_torch def UpperCamelCase_ ( self ) -> Dict: import torch SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]: from datasets import load_dataset SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def UpperCamelCase_ ( self ) -> str: # fmt: off SCREAMING_SNAKE_CASE__: str= torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor() SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
64
0
'''simple docstring''' import functools def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> int: '''simple docstring''' UpperCAmelCase_ = len(snake_case_ ) UpperCAmelCase_ = len(snake_case_ ) @functools.cache def min_distance(snake_case_ : int , snake_case_ : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case_ ) , 1 + min_distance(snake_case_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
78
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase_ : List[Any] = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
from typing import Any class UpperCAmelCase_ : def __init__( self , _lowerCAmelCase ): UpperCAmelCase__ : Tuple = data UpperCAmelCase__ : str = None def __repr__( self ): return f"Node({self.data})" class UpperCAmelCase_ : def __init__( self ): UpperCAmelCase__ : Union[str, Any] = None def __iter__( self ): UpperCAmelCase__ : List[str] = self.head while node: yield node.data UpperCAmelCase__ : List[str] = node.next def __len__( self ): return sum(1 for _ in self ) def __repr__( self ): return "->".join([str(_lowerCAmelCase ) for item in self] ) def __getitem__( self , _lowerCAmelCase ): if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , _lowerCAmelCase , _lowerCAmelCase ): if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) UpperCAmelCase__ : Optional[int] = self.head for _ in range(_lowerCAmelCase ): UpperCAmelCase__ : Any = current.next UpperCAmelCase__ : Dict = data def __UpperCAmelCase ( self , _lowerCAmelCase ): self.insert_nth(len(self ) , _lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase ): self.insert_nth(0 , _lowerCAmelCase ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ): if not 0 <= index <= len(self ): raise IndexError("""list index out of range""" ) UpperCAmelCase__ : Union[str, Any] = Node(_lowerCAmelCase ) if self.head is None: UpperCAmelCase__ : int = new_node elif index == 0: UpperCAmelCase__ : str = self.head # link new_node to head UpperCAmelCase__ : Dict = new_node else: UpperCAmelCase__ : List[Any] = self.head for _ in range(index - 1 ): UpperCAmelCase__ : List[str] = temp.next UpperCAmelCase__ : Optional[int] = temp.next UpperCAmelCase__ : Dict = new_node def __UpperCAmelCase ( self ): # print every node data print(self ) def __UpperCAmelCase ( self ): return self.delete_nth(0 ) def __UpperCAmelCase ( self ): # delete from tail return self.delete_nth(len(self ) - 1 ) def __UpperCAmelCase ( self , _lowerCAmelCase = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("""List index out of range.""" ) UpperCAmelCase__ : List[Any] = self.head # default first node if index == 0: UpperCAmelCase__ : Union[str, Any] = self.head.next else: UpperCAmelCase__ : Optional[Any] = self.head for _ in range(index - 1 ): UpperCAmelCase__ : Optional[Any] = temp.next UpperCAmelCase__ : Union[str, Any] = temp.next UpperCAmelCase__ : List[Any] = temp.next.next return delete_node.data def __UpperCAmelCase ( self ): return self.head is None def __UpperCAmelCase ( self ): UpperCAmelCase__ : int = None UpperCAmelCase__ : Optional[Any] = self.head while current: # Store the current node's next node. UpperCAmelCase__ : List[Any] = current.next # Make the current node's next point backwards UpperCAmelCase__ : Optional[Any] = prev # Make the previous node be the current node UpperCAmelCase__ : int = current # Make the current node the next node (to progress iteration) UpperCAmelCase__ : Dict = next_node # Return prev in order to put the head at the end UpperCAmelCase__ : str = prev def _lowerCamelCase ( ) -> None: '''simple docstring''' UpperCAmelCase__ : Dict = LinkedList() assert linked_list.is_empty() is True assert str(__lowerCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__lowerCamelCase ) == i linked_list.insert_nth(__lowerCamelCase , i + 1 ) assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__lowerCamelCase ) == 9 assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): UpperCAmelCase__ : Any = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__lowerCamelCase ) == "->".join(str(__lowerCamelCase ) for i in range(-8 , 1 ) ) def _lowerCamelCase ( ) -> None: '''simple docstring''' UpperCAmelCase__ : Any = [ -9, 100, Node(7734_5112 ), """dlrow olleH""", 7, 5555, 0, -192.55_555, """Hello, world!""", 77.9, Node(10 ), None, None, 12.20, ] UpperCAmelCase__ : List[str] = LinkedList() for i in test_input: linked_list.insert_tail(__lowerCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head UpperCAmelCase__ : Any = linked_list.delete_head() assert result == -9 assert ( str(__lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail UpperCAmelCase__ : Tuple = linked_list.delete_tail() assert result == 12.2 assert ( str(__lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list UpperCAmelCase__ : str = linked_list.delete_nth(10 ) assert result is None assert ( str(__lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("""Hello again, world!""" ) ) assert ( str(__lowerCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__lowerCamelCase ) assert ( str(__lowerCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__lowerCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _lowerCamelCase ( ) -> List[str]: '''simple docstring''' from doctest import testmod testmod() UpperCAmelCase__ : Optional[Any] = LinkedList() linked_list.insert_head(input("""Inserting 1st at head """ ).strip() ) linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() ) linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() print("""\nDelete head""" ) linked_list.delete_head() print("""Delete tail""" ) linked_list.delete_tail() print("""\nPrint list:""" ) linked_list.print_list() print("""\nReverse linked list""" ) linked_list.reverse() print("""\nPrint list:""" ) linked_list.print_list() print("""\nString representation of linked list:""" ) print(__lowerCamelCase ) print("""\nReading/changing Node data using indexing:""" ) print(F"Element at Position 1: {linked_list[1]}" ) UpperCAmelCase__ : List[Any] = input("""Enter New Value: """ ).strip() print("""New list:""" ) print(__lowerCamelCase ) print(F"length of linked_list is : {len(__lowerCamelCase )}" ) if __name__ == "__main__": main()
79
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def A__ ( ): SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , ) SCREAMING_SNAKE_CASE__: Any= parser.parse_args() return args def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ): if not len(snake_case_ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size for i, img in enumerate(snake_case_ ): grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) ) return grid def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ): SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= pipeline( snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowercase_ : List[str] = parse_args() # Load models and create wrapper for stable diffusion lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') lowercase_ : Dict = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowercase_ : str = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id)) lowercase_ : str = pipeline.to(unet.device) lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
64
0
__UpperCamelCase : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } __UpperCamelCase : str = {value: key for key, value in encode_dict.items()} def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def snake_case ( lowerCamelCase ): '''simple docstring''' if set(lowerCamelCase ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) __lowercase = """""" for word in coded.split(): while len(lowerCamelCase ) != 0: decoded += decode_dict[word[:5]] __lowercase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
80
from __future__ import annotations from collections import deque class _lowerCamelCase : def __init__( self , lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: list[dict]= [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(lowerCAmelCase ) self.set_fail_transitions() def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase_ ( self , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: str= 0 for character in keyword: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1 else: SCREAMING_SNAKE_CASE__: List[Any]= next_state self.adlist[current_state]["output"].append(lowerCAmelCase ) def UpperCamelCase_ ( self ) -> None: SCREAMING_SNAKE_CASE__: deque= deque() for node in self.adlist[0]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= 0 while q: SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state'''] while ( self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None and state != 0 ): SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Dict= self.find_next_state( lowerCAmelCase , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 SCREAMING_SNAKE_CASE__: str= ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]: SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences SCREAMING_SNAKE_CASE__: Optional[Any]= 0 for i in range(len(lowerCAmelCase ) ): while ( self.find_next_state(lowerCAmelCase , string[i] ) is None and current_state != 0 ): SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] ) if next_state is None: SCREAMING_SNAKE_CASE__: List[Any]= 0 else: SCREAMING_SNAKE_CASE__: Dict= next_state for key in self.adlist[current_state]["output"]: if key not in result: SCREAMING_SNAKE_CASE__: Optional[Any]= [] result[key].append(i - len(lowerCAmelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
64
0
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowerCAmelCase_ ( __lowerCamelCase ): return x + 2 class a (unittest.TestCase ): """simple docstring""" def __snake_case ( self : Tuple ) -> Any: __snake_case : Optional[Any] = "x = 3" __snake_case : Any = {} __snake_case : Optional[int] = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) assert result == 3 self.assertDictEqual(lowerCamelCase , {"x": 3} ) __snake_case : str = "x = y" __snake_case : List[Any] = {"y": 5} __snake_case : Any = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase , {"x": 5, "y": 5} ) def __snake_case ( self : Any ) -> List[str]: __snake_case : int = "y = add_two(x)" __snake_case : Any = {"x": 3} __snake_case : str = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase ) assert result == 5 self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} ) # Won't work without the tool with CaptureStdout() as out: __snake_case : str = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) assert result is None assert "tried to execute add_two" in out.out def __snake_case ( self : Dict ) -> str: __snake_case : str = "x = 3" __snake_case : List[Any] = {} __snake_case : List[Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) assert result == 3 self.assertDictEqual(lowerCamelCase , {"x": 3} ) def __snake_case ( self : Optional[int] ) -> List[Any]: __snake_case : Union[str, Any] = "test_dict = {'x': x, 'y': add_two(x)}" __snake_case : Tuple = {"x": 3} __snake_case : List[Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase ) self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} ) self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __snake_case ( self : Optional[int] ) -> int: __snake_case : int = "x = 3\ny = 5" __snake_case : Optional[int] = {} __snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 5} ) def __snake_case ( self : Dict ) -> Tuple: __snake_case : List[Any] = "text = f'This is x: {x}.'" __snake_case : List[Any] = {"x": 3} __snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(lowerCamelCase , {"x": 3, "text": "This is x: 3."} ) def __snake_case ( self : Any ) -> Dict: __snake_case : List[str] = "if x <= 3:\n y = 2\nelse:\n y = 5" __snake_case : Tuple = {"x": 3} __snake_case : int = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 2} ) __snake_case : str = {"x": 8} __snake_case : List[str] = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCamelCase , {"x": 8, "y": 5} ) def __snake_case ( self : int ) -> int: __snake_case : Tuple = "test_list = [x, add_two(x)]" __snake_case : List[str] = {"x": 3} __snake_case : Any = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase ) self.assertListEqual(lowerCamelCase , [3, 5] ) self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} ) def __snake_case ( self : Union[str, Any] ) -> Union[str, Any]: __snake_case : Optional[int] = "y = x" __snake_case : Any = {"x": 3} __snake_case : Union[str, Any] = evaluate(lowerCamelCase , {} , state=lowerCamelCase ) assert result == 3 self.assertDictEqual(lowerCamelCase , {"x": 3, "y": 3} ) def __snake_case ( self : Any ) -> Any: __snake_case : Optional[Any] = "test_list = [x, add_two(x)]\ntest_list[1]" __snake_case : str = {"x": 3} __snake_case : Optional[int] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase ) assert result == 5 self.assertDictEqual(lowerCamelCase , {"x": 3, "test_list": [3, 5]} ) __snake_case : str = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']" __snake_case : Optional[Any] = {"x": 3} __snake_case : Union[str, Any] = evaluate(lowerCamelCase , {"add_two": add_two} , state=lowerCamelCase ) assert result == 5 self.assertDictEqual(lowerCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} ) def __snake_case ( self : Dict ) -> List[Any]: __snake_case : Any = "x = 0\nfor i in range(3):\n x = i" __snake_case : Union[str, Any] = {} __snake_case : Any = evaluate(lowerCamelCase , {"range": range} , state=lowerCamelCase ) assert result == 2 self.assertDictEqual(lowerCamelCase , {"x": 2, "i": 2} )
81
import numpy as np def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__: int= ya SCREAMING_SNAKE_CASE__: Tuple= xa for k in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] ) SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
64
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase = { """configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = ["""BloomTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""", """BloomForCausalLM""", """BloomModel""", """BloomPreTrainedModel""", """BloomForSequenceClassification""", """BloomForTokenClassification""", """BloomForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
82
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' ) self.assertIsInstance(lowerCAmelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' ) self.assertIsInstance(lowerCAmelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
64
0
"""simple docstring""" from __future__ import annotations from scipy.special import comb # type: ignore class __snake_case : def __init__( self : Union[str, Any] , __lowerCAmelCase : list[tuple[float, float]] ): """simple docstring""" _lowerCamelCase : Any = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. _lowerCamelCase : Any = len(__lowerCAmelCase ) - 1 def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : float ): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." _lowerCamelCase : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__lowerCAmelCase ) , 5 ) == 1 return output_values def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : float ): """simple docstring""" assert 0 <= t <= 1, "Time t must be between 0 and 1." _lowerCamelCase : Optional[int] = self.basis_function(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = 0.0 _lowerCamelCase : Any = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : float = 0.01 ): """simple docstring""" from matplotlib import pyplot as plt # type: ignore _lowerCamelCase : list[float] = [] # x coordinates of points to plot _lowerCamelCase : list[float] = [] # y coordinates of points to plot _lowerCamelCase : List[Any] = 0.0 while t <= 1: _lowerCamelCase : List[Any] = self.bezier_curve_function(__lowerCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size _lowerCamelCase : List[str] = [i[0] for i in self.list_of_points] _lowerCamelCase : Optional[int] = [i[1] for i in self.list_of_points] plt.plot( __lowerCAmelCase , __lowerCAmelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
83
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase_ : Tuple = TypeVar('T') class _lowerCamelCase ( Generic[T] ): def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: Any | T= None SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr SCREAMING_SNAKE_CASE__: List[Any]= fnc self.build() def UpperCamelCase_ ( self ) -> None: for p in range(self.N - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None: p += self.N SCREAMING_SNAKE_CASE__: Union[str, Any]= v while p > 1: SCREAMING_SNAKE_CASE__: Any= p // 2 SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N SCREAMING_SNAKE_CASE__: T | None= None while l <= r: if l % 2 == 1: SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] ) if r % 2 == 0: SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] lowercase_ : str = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } lowercase_ : int = SegmentTree(test_array, min) lowercase_ : Optional[int] = SegmentTree(test_array, max) lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b) def A__ ( ): for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): lowercase_ : int = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
64
0
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return number | (1 << position) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return number & ~(1 << position) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return number ^ (1 << position) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return ((number >> position) & 1) == 1 def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
84
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __a = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ) -> str: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= 2 SCREAMING_SNAKE_CASE__: Tuple= randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ) SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: Tuple= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> Tuple: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase_ ( self ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(lowerCAmelCase ): if isinstance(lowerCAmelCase , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) SCREAMING_SNAKE_CASE__: Any= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] ) SCREAMING_SNAKE_CASE__: int= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= 2 SCREAMING_SNAKE_CASE__: Tuple= [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), ] SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: int= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components() SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= 10.0 SCREAMING_SNAKE_CASE__: Any= 4 SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0] SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCamelCase_ ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components() SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(lowerCAmelCase ) except NotImplementedError: pass @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird''' SCREAMING_SNAKE_CASE__: List[str]= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: List[Any]= load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: Optional[Any]= pipe( lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE__: str= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
64
0
from __future__ import annotations def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [True] * limit SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : int = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = i * 2 while index < limit: SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Tuple = index + i SCREAMING_SNAKE_CASE__ : List[str] = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = prime_sieve(lowercase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): SCREAMING_SNAKE_CASE__ : int = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: SCREAMING_SNAKE_CASE__ : Any = j - i SCREAMING_SNAKE_CASE__ : int = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
85
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _lowerCamelCase : __a = 42 # setable values __a = 42 __a = 42 __a = None @classmethod def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]: return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase ) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = 42 class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ): __a = [e.name for e in FlaxKarrasDiffusionSchedulers] __a = 42 @property def UpperCamelCase_ ( self ) -> List[Any]: return True @register_to_config def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= dtype def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState: if common is None: SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype ) SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray: return sample def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState: SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]: SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": SCREAMING_SNAKE_CASE__: List[Any]= variance SCREAMING_SNAKE_CASE__: Any= state.common.betas[t] SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep if key is None: SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 ) else: SCREAMING_SNAKE_CASE__: Any= None # 1. compute alphas, betas SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE__: str= model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 ) SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def __len__( self ) -> Tuple: return self.config.num_train_timesteps
64
0
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'torchsde'] def __init__( self : Any , *UpperCAmelCase : int , **UpperCAmelCase : int ): requires_backends(self , ["torch", "torchsde"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "torchsde"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "torchsde"] )
86
def A__ ( snake_case_ : int ): if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 SCREAMING_SNAKE_CASE__: List[str]= 1 if upper_limit > 0: SCREAMING_SNAKE_CASE__: List[str]= 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(snake_case_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: lowercase_ : Any = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
64
0
_lowerCamelCase : Union[str, Any] = [0, 2, 4, 6, 8] _lowerCamelCase : List[Any] = [1, 3, 5, 7, 9] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int: """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 A__ = 0 for digit in range(10 ): A__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , lowercase_ , lowercase_ ) return result A__ = 0 for digita in range(10 ): A__ = digita if (remainder + digita) % 2 == 0: A__ = ODD_DIGITS else: A__ = EVEN_DIGITS for digita in other_parity_digits: A__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , lowercase_ , lowercase_ , ) return result def SCREAMING_SNAKE_CASE ( lowercase_ = 9 ) -> int: """simple docstring""" A__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(lowercase_ , 0 , [0] * length , lowercase_ ) return result if __name__ == "__main__": print(F'''{solution() = }''')
87
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
64
0
"""simple docstring""" def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : Dict = int(__snake_case ) if n_element < 1: _lowerCamelCase : List[Any] = ValueError("""a should be a positive number""" ) raise my_error _lowerCamelCase : Dict = [1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = (0, 0, 0) _lowerCamelCase : Tuple = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": UpperCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") UpperCAmelCase = hamming(int(n)) print("""-----------------------------------------------------""") print(f'''The list with nth numbers is: {hamming_numbers}''') print("""-----------------------------------------------------""")
88
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ : Optional[int] = logging.get_logger(__name__) def A__ ( snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights SCREAMING_SNAKE_CASE__: List[str]= [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: str= { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: int= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE__: int= sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' ) SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE__: List[Any]= q SCREAMING_SNAKE_CASE__: Any= k SCREAMING_SNAKE_CASE__: Optional[Any]= v del sd[key] return sd @torch.no_grad() def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ): SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ ) if config is not None: SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ ) else: SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig() SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval() model.load_state_dict(snake_case_ ) # Check results Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') lowercase_ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
64
0
def UpperCamelCase_( lowerCamelCase_ ) -> bool: _lowercase : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack _lowercase : set[int] = set() return any( node not in visited and depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for node in graph ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool: visited.add(lowerCamelCase_ ) rec_stk.add(lowerCamelCase_ ) for node in graph[vertex]: if node not in visited: if depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(lowerCamelCase_ ) return False if __name__ == "__main__": from doctest import testmod testmod()
89
def A__ ( snake_case_ : float , snake_case_ : float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
64
0
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path __UpperCAmelCase = '''src/transformers''' # Matches is_xxx_available() __UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} __UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available __UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") __UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", __UpperCAmelCase = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], __UpperCAmelCase = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo __UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: __UpperCAmelCase = re.compile(R'''^\s*try:''') # Catches a line with else: __UpperCAmelCase = re.compile(R'''^\s*else:''') def _snake_case ( A ) -> List[str]: if _re_test_backend.search(A ) is None: return None lowerCAmelCase__ = [b[0] for b in _re_backend.findall(A )] backends.sort() return "_and_".join(A ) def _snake_case ( A ) -> Optional[int]: with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = 0 while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A ): lowerCAmelCase__ = _re_one_line_import_struct.search(A ).groups()[0] lowerCAmelCase__ = re.findall('''\[([^\]]+)\]''' , A ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ = _re_import_struct_key_value.search(A ) if single_line_import_search is not None: lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0] objects.extend(A ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ = lines[line_index] if _re_import_struct_add_one.search(A ) is not None: objects.append(_re_import_struct_add_one.search(A ).groups()[0] ) elif _re_import_struct_add_many.search(A ) is not None: lowerCAmelCase__ = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' ) lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(A ) > 0] objects.extend(A ) elif _re_between_brackets.search(A ) is not None: lowerCAmelCase__ = _re_between_brackets.search(A ).groups()[0].split(''', ''' ) lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(A ) > 0] objects.extend(A ) elif _re_quote_object.search(A ) is not None: objects.append(_re_quote_object.search(A ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ = [] while ( line_index < len(A ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ = lines[line_index] lowerCAmelCase__ = _re_import.search(A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(A ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ = lines[line_index] lowerCAmelCase__ = _re_import.search(A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( A , A ) -> str: def find_duplicates(A ): return [k for k, v in collections.Counter(A ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ = [] for key in import_dict_objects.keys(): lowerCAmelCase__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def _snake_case ( ) -> int: lowerCAmelCase__ = [] for root, _, files in os.walk(A ): if "__init__.py" in files: lowerCAmelCase__ = os.path.join(A , '''__init__.py''' ) lowerCAmelCase__ = parse_init(A ) if objects is not None: lowerCAmelCase__ = analyze_results(*A ) if len(A ) > 0: lowerCAmelCase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(A ) ) if len(A ) > 0: raise ValueError('''\n\n'''.join(A ) ) def _snake_case ( ) -> str: lowerCAmelCase__ = [] for path, directories, files in os.walk(A ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(A ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ = str((Path(A ) / folder).relative_to(A ) ) lowerCAmelCase__ = short_path.replace(os.path.sep , '''.''' ) submodules.append(A ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ = str((Path(A ) / fname).relative_to(A ) ) lowerCAmelCase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(A ) return submodules __UpperCAmelCase = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def _snake_case ( ) -> str: # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase__ = importlib.util.spec_from_file_location( '''transformers''' , os.path.join(A , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) lowerCAmelCase__ = spec.loader.load_module() lowerCAmelCase__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A ) > 0: lowerCAmelCase__ = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registered in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
90
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Any = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __a = Features({"text": Value("string" )} ) __a = Features({"labels": ClassLabel} ) __a = "text" __a = "labels" def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self ) SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy() SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column] SCREAMING_SNAKE_CASE__: List[str]= label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
64
0
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) UpperCamelCase_ = logging.getLogger() UpperCamelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __SCREAMING_SNAKE_CASE ( lowercase__ ): def lowerCamelCase_ ( self : str , UpperCAmelCase__ : Optional[Any] ): '''simple docstring''' os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) lowercase : Optional[int] ={'''source''': '''What is love ?''', '''target''': '''life'''} lowercase : Tuple ={'''train''': 12, '''val''': 2, '''test''': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: lowercase : Any ='''\n'''.join([contents[field]] * n_lines[split] ) with open(os.path.join(UpperCAmelCase__ , F'''{split}.{field}''' ) , '''w''' ) as f: f.write(UpperCAmelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "pytorch" ): '''simple docstring''' lowercase : Any =self.get_auto_remove_tmp_dir() lowercase : List[Any] =os.path.join(UpperCAmelCase__ , '''output''' ) lowercase : List[Any] =os.path.join(UpperCAmelCase__ , '''data''' ) self._create_dummy_data(data_dir=UpperCAmelCase__ ) lowercase : List[Any] =F''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(F'''--gpus={gpus}''' ) if is_apex_available(): testargs.append('''--fp16''' ) else: testargs.append('''--gpus=0''' ) testargs.append('''--distributed_backend=ddp_cpu''' ) testargs.append('''--num_processes=2''' ) lowercase : Dict =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() ) lowercase : Tuple =os.path.join(UpperCAmelCase__ , '''metrics.json''' ) with open(UpperCAmelCase__ ) as f: lowercase : Optional[Any] =json.load(UpperCAmelCase__ ) return result @require_torch_gpu def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : List[str] =self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) @require_torch_multi_gpu def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[Any] =self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) @require_torch_gpu @require_ray def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : List[Any] =self._run_finetune(gpus=1 , distributed_retriever='''ray''' ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 ) @require_torch_multi_gpu @require_ray def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[Any] =self._run_finetune(gpus=1 , distributed_retriever='''ray''' ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
92
import inspect import unittest class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion''' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE__: int= '''invisible-watermark''' assert backend in deps, f'{backend} is not in the deps table!'
64
0
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = parent lowerCAmelCase__ :Optional[int] = batch_size lowerCAmelCase__ :List[Any] = seq_length lowerCAmelCase__ :int = is_training lowerCAmelCase__ :Optional[Any] = use_input_mask lowerCAmelCase__ :str = use_token_type_ids lowerCAmelCase__ :Tuple = use_labels lowerCAmelCase__ :Any = vocab_size lowerCAmelCase__ :Union[str, Any] = hidden_size lowerCAmelCase__ :List[str] = num_hidden_layers lowerCAmelCase__ :Optional[int] = num_attention_heads lowerCAmelCase__ :Tuple = intermediate_size lowerCAmelCase__ :Union[str, Any] = hidden_act lowerCAmelCase__ :int = hidden_dropout_prob lowerCAmelCase__ :str = attention_probs_dropout_prob lowerCAmelCase__ :List[Any] = max_position_embeddings lowerCAmelCase__ :str = type_vocab_size lowerCAmelCase__ :List[str] = type_sequence_label_size lowerCAmelCase__ :int = initializer_range lowerCAmelCase__ :List[str] = num_labels lowerCAmelCase__ :List[Any] = num_choices lowerCAmelCase__ :int = relative_attention lowerCAmelCase__ :Optional[Any] = position_biased_input lowerCAmelCase__ :Union[str, Any] = pos_att_type lowerCAmelCase__ :Optional[Any] = scope def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ :Union[str, Any] = None if self.use_input_mask: lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCAmelCase__ :Union[str, Any] = None if self.use_token_type_ids: lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ :Any = None lowerCAmelCase__ :int = None lowerCAmelCase__ :str = None if self.use_labels: lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ :Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ :List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case ( self ): '''simple docstring''' return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.get_config() lowerCAmelCase__ :List[Any] = 3_0_0 return config def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' self.parent.assertListEqual(list(result.loss.size() ) , [] ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :str = DebertaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0] lowerCAmelCase__ :List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0] lowerCAmelCase__ :str = model(__UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = DebertaForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = self.num_labels lowerCAmelCase__ :List[str] = DebertaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.num_labels lowerCAmelCase__ :int = DebertaForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = DebertaForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ :Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) :Optional[Any] = config_and_inputs lowerCAmelCase__ :Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[Any] = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ :List[str] = ( { """feature-extraction""": DebertaModel, """fill-mask""": DebertaForMaskedLM, """question-answering""": DebertaForQuestionAnswering, """text-classification""": DebertaForSequenceClassification, """token-classification""": DebertaForTokenClassification, """zero-shot""": DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ :str = True __magic_name__ :Tuple = False __magic_name__ :int = False __magic_name__ :Optional[int] = False __magic_name__ :List[str] = False def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = DebertaModelTester(self ) lowerCAmelCase__ :Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 ) def snake_case ( self ): '''simple docstring''' self.config_tester.run_common_tests() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ :Dict = DebertaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason='Model not available yet' ) def snake_case ( self ): '''simple docstring''' pass @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = DebertaModel.from_pretrained('microsoft/deberta-base' ) lowerCAmelCase__ :Any = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) lowerCAmelCase__ :List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] # compare the actual values for a slice. lowerCAmelCase__ :Any = torch.tensor( [[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F"{output[:, 1:4, 1:4]}" )
93
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , ) assert hasattr(self , '''env''' ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: # configuration for running training on smdistributed Model Parallel SCREAMING_SNAKE_CASE__: Optional[Any]= { '''enabled''': True, '''processes_per_host''': 8, } SCREAMING_SNAKE_CASE__: Dict= { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: # create estimator SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase ) # run training estimator.fit() # result dataframe SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping SCREAMING_SNAKE_CASE__: List[Any]= ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
64
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
94
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): @property def UpperCamelCase_ ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions() SCREAMING_SNAKE_CASE__: List[str]= False return options def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__: int= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__: Tuple= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__: Any= pipe( prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__: Any= output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
64
0
"""simple docstring""" def snake_case ( A__ ,A__ ): print("\nThe shortest path matrix using Floyd Warshall algorithm\n" ) for i in range(A__ ): for j in range(A__ ): if dist[i][j] != float("inf" ): print(int(dist[i][j] ) ,end="\t" ) else: print("INF" ,end="\t" ) print() def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Any = [[float("inf" ) for _ in range(A__ )] for _ in range(A__ )] for i in range(A__ ): for j in range(A__ ): UpperCAmelCase_ : List[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(A__ ): # looping through rows of graph array for i in range(A__ ): # looping through columns of graph array for j in range(A__ ): if ( dist[i][k] != float("inf" ) and dist[k][j] != float("inf" ) and dist[i][k] + dist[k][j] < dist[i][j] ): UpperCAmelCase_ : Dict = dist[i][k] + dist[k][j] _print_dist(A__ ,A__ ) return dist, v if __name__ == "__main__": lowerCamelCase_ = int(input('''Enter number of vertices: ''')) lowerCamelCase_ = int(input('''Enter number of edges: ''')) lowerCamelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCamelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCamelCase_ = int(input('''Enter source:''')) lowerCamelCase_ = int(input('''Enter destination:''')) lowerCamelCase_ = float(input('''Enter weight:''')) lowerCamelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
95
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **lowerCAmelCase ) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:] setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript ) SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**lowerCAmelCase ) __a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) __a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) __a = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 elif is_torch_tpu_available(): SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device() SCREAMING_SNAKE_CASE__: Any= 0 else: SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count() return device, n_gpu @property def UpperCamelCase_ ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCamelCase_ ( self ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def UpperCamelCase_ ( self ) -> str: return self.n_gpu > 0
64
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class __A ( SCREAMING_SNAKE_CASE_ ): UpperCAmelCase__ = ["pixel_values"] def __init__( self : Optional[int] , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_5_5 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : List[str] , ) -> None: super().__init__(**__snake_case ) __magic_name__: Tuple = size if size is not None else {"""shortest_edge""": 2_2_4} __magic_name__: str = get_size_dict(__snake_case , default_to_square=__snake_case ) __magic_name__: Dict = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} __magic_name__: Union[str, Any] = get_size_dict(__snake_case , default_to_square=__snake_case , param_name="""crop_size""" ) __magic_name__: Dict = do_resize __magic_name__: Union[str, Any] = size __magic_name__: int = resample __magic_name__: Any = do_center_crop __magic_name__: int = crop_size __magic_name__: List[Any] = do_rescale __magic_name__: int = rescale_factor __magic_name__: Any = do_normalize __magic_name__: List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__: str = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__: Optional[int] = do_convert_rgb def lowerCamelCase__ ( self : List[str] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str , ) -> np.ndarray: __magic_name__: Union[str, Any] = get_size_dict(__snake_case , default_to_square=__snake_case ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) __magic_name__: List[str] = get_resize_output_image_size(__snake_case , size=size["""shortest_edge"""] , default_to_square=__snake_case ) return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Any , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray: __magic_name__: str = get_size_dict(__snake_case ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__snake_case , size=(size["""height"""], size["""width"""]) , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ) -> Optional[Any]: return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : str , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ) -> np.ndarray: return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case ) def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Optional[Any] , ) -> PIL.Image.Image: __magic_name__: List[str] = do_resize if do_resize is not None else self.do_resize __magic_name__: Tuple = size if size is not None else self.size __magic_name__: Any = get_size_dict(__snake_case , param_name="""size""" , default_to_square=__snake_case ) __magic_name__: Dict = resample if resample is not None else self.resample __magic_name__: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__: int = crop_size if crop_size is not None else self.crop_size __magic_name__: List[Any] = get_size_dict(__snake_case , param_name="""crop_size""" , default_to_square=__snake_case ) __magic_name__: List[Any] = do_rescale if do_rescale is not None else self.do_rescale __magic_name__: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__: int = do_normalize if do_normalize is not None else self.do_normalize __magic_name__: Optional[int] = image_mean if image_mean is not None else self.image_mean __magic_name__: Any = image_std if image_std is not None else self.image_std __magic_name__: str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__: str = make_list_of_images(__snake_case ) if not valid_images(__snake_case ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__: int = [convert_to_rgb(__snake_case ) for image in images] # All transformations expect numpy arrays. __magic_name__: int = [to_numpy_array(__snake_case ) for image in images] if do_resize: __magic_name__: Union[str, Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images] if do_center_crop: __magic_name__: int = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images] if do_rescale: __magic_name__: Dict = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images] if do_normalize: __magic_name__: int = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images] __magic_name__: List[Any] = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images] __magic_name__: Tuple = {"""pixel_values""": images} return BatchFeature(data=__snake_case , tensor_type=__snake_case )
96
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str: SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30} SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} SCREAMING_SNAKE_CASE__: Dict= parent SCREAMING_SNAKE_CASE__: List[str]= batch_size SCREAMING_SNAKE_CASE__: int= num_channels SCREAMING_SNAKE_CASE__: int= min_resolution SCREAMING_SNAKE_CASE__: List[Any]= max_resolution SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop SCREAMING_SNAKE_CASE__: Union[str, Any]= size SCREAMING_SNAKE_CASE__: Dict= crop_pct SCREAMING_SNAKE_CASE__: Optional[int]= crop_size SCREAMING_SNAKE_CASE__: Dict= do_normalize SCREAMING_SNAKE_CASE__: List[str]= image_mean SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std def UpperCamelCase_ ( self ) -> Tuple: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase_ ( self ) -> Tuple: pass def UpperCamelCase_ ( self ) -> Optional[int]: # Initialize image_processing SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> int: # Initialize image_processing SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
64
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __a = logging.get_logger(__name__) if is_vision_available(): import PIL class lowercase__( UpperCAmelCase ): """simple docstring""" a :Dict = ['pixel_values'] def __init__( self : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = size if size is not None else {'''shortest_edge''': 2_2_4} lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) lowercase_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) lowercase_ = do_resize lowercase_ = size lowercase_ = resample lowercase_ = do_center_crop lowercase_ = crop_size lowercase_ = do_rescale lowercase_ = rescale_factor lowercase_ = do_normalize lowercase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase_ = image_std if image_std is not None else OPENAI_CLIP_STD lowercase_ = do_convert_rgb def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> np.ndarray: lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> np.ndarray: lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ) -> Dict: return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : int , ) -> PIL.Image.Image: lowercase_ = do_resize if do_resize is not None else self.do_resize lowercase_ = size if size is not None else self.size lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''size''' , default_to_square=SCREAMING_SNAKE_CASE_ ) lowercase_ = resample if resample is not None else self.resample lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase_ = crop_size if crop_size is not None else self.crop_size lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' , default_to_square=SCREAMING_SNAKE_CASE_ ) lowercase_ = do_rescale if do_rescale is not None else self.do_rescale lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ = do_normalize if do_normalize is not None else self.do_normalize lowercase_ = image_mean if image_mean is not None else self.image_mean lowercase_ = image_std if image_std is not None else self.image_std lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ = [convert_to_rgb(SCREAMING_SNAKE_CASE_ ) for image in images] # All transformations expect numpy arrays. lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: lowercase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: lowercase_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] lowercase_ = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
97
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase_ : Tuple = 3 def A__ ( snake_case_ : int ): print('''Generating primitive root of p''' ) while True: SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ ) if pow(snake_case_ , 2 , snake_case_ ) == 1: continue if pow(snake_case_ , snake_case_ , snake_case_ ) == 1: continue return g def A__ ( snake_case_ : int ): print('''Generating prime p...''' ) SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number. SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d) return public_key, private_key def A__ ( snake_case_ : str , snake_case_ : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , '''w''' ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def A__ ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
64
0
'''simple docstring''' import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' debug_launcher(test_script.main ) def snake_case__ ( self : Any ) -> Optional[Any]: '''simple docstring''' debug_launcher(test_ops.main )
98
from math import factorial def A__ ( snake_case_ : int , snake_case_ : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(5_2, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(4_0, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(1_0, 3)} ways that first, second and''', 'third place can be awarded.', )
64
0
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def a (lowerCAmelCase__ ): __a = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( """`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """ f'''{test_file} instead.''' ) __a = components[-1] if not test_fn.endswith("""py""" ): raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith("""test_modeling_""" ): raise ValueError( f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) __a = components[:-1] + [test_fn.replace(""".py""" , """""" )] __a = """.""".join(lowerCAmelCase__ ) return test_module_path def a (lowerCAmelCase__ ): __a = get_module_path(lowerCAmelCase__ ) __a = importlib.import_module(lowerCAmelCase__ ) return test_module def a (lowerCAmelCase__ ): __a = [] __a = get_test_module(lowerCAmelCase__ ) for attr in dir(lowerCAmelCase__ ): if attr.endswith("""ModelTester""" ): tester_classes.append(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # sort with class names return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ ) def a (lowerCAmelCase__ ): __a = [] __a = get_test_module(lowerCAmelCase__ ) for attr in dir(lowerCAmelCase__ ): __a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). __a = getattr(lowerCAmelCase__ , """all_model_classes""" , [] ) if len(lowerCAmelCase__ ) > 0: test_classes.append(lowerCAmelCase__ ) # sort with class names return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ ) def a (lowerCAmelCase__ ): __a = get_test_classes(lowerCAmelCase__ ) __a = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ ) def a (lowerCAmelCase__ ): __a = test_class() if hasattr(lowerCAmelCase__ , """setUp""" ): test.setUp() __a = None if hasattr(lowerCAmelCase__ , """model_tester""" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: __a = test.model_tester.__class__ return model_tester def a (lowerCAmelCase__ , lowerCAmelCase__ ): __a = get_test_classes(lowerCAmelCase__ ) __a = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCAmelCase__ ) # sort with class names return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ ) def a (lowerCAmelCase__ , lowerCAmelCase__ ): __a = get_test_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) __a = [] for test_class in test_classes: __a = get_model_tester_from_test_class(lowerCAmelCase__ ) if tester_class is not None: tester_classes.append(lowerCAmelCase__ ) # sort with class names return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ ) def a (lowerCAmelCase__ ): __a = get_test_classes(lowerCAmelCase__ ) __a = {test_class: get_model_tester_from_test_class(lowerCAmelCase__ ) for test_class in test_classes} return test_tester_mapping def a (lowerCAmelCase__ ): __a = get_model_classes(lowerCAmelCase__ ) __a = { model_class: get_test_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) for model_class in model_classes } return model_test_mapping def a (lowerCAmelCase__ ): __a = get_model_classes(lowerCAmelCase__ ) __a = { model_class: get_tester_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) for model_class in model_classes } return model_to_tester_mapping def a (lowerCAmelCase__ ): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return o elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return o.__name__ elif isinstance(lowerCAmelCase__ , (list, tuple) ): return [to_json(lowerCAmelCase__ ) for x in o] elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return {to_json(lowerCAmelCase__ ): to_json(lowerCAmelCase__ ) for k, v in o.items()} else: return o
99
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase_ : Dict = random.Random() if is_torch_available(): import torch def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ): if rng is None: SCREAMING_SNAKE_CASE__: Tuple= global_rng SCREAMING_SNAKE_CASE__: List[str]= [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]: SCREAMING_SNAKE_CASE__: Optional[Any]= parent SCREAMING_SNAKE_CASE__: Dict= batch_size SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length SCREAMING_SNAKE_CASE__: Dict= max_seq_length SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE__: Dict= feature_size SCREAMING_SNAKE_CASE__: str= padding_value SCREAMING_SNAKE_CASE__: Dict= sampling_rate SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask SCREAMING_SNAKE_CASE__: str= do_normalize def UpperCamelCase_ ( self ) -> Optional[Any]: return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict: def _flatten(lowerCAmelCase ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE__: int= [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = ASTFeatureExtractor def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self ) def UpperCamelCase_ ( self ) -> Any: # Tests that all call wrap to encode_plus and batch_encode_plus SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) @require_torch def UpperCamelCase_ ( self ) -> Dict: import torch SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]: from datasets import load_dataset SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def UpperCamelCase_ ( self ) -> str: # fmt: off SCREAMING_SNAKE_CASE__: str= torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 ) SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor() SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
64
0
def __snake_case ( lowerCAmelCase_ ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) SCREAMING_SNAKE_CASE__ = len(bin(lowerCAmelCase_ )[3:] ) SCREAMING_SNAKE_CASE__ = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:] SCREAMING_SNAKE_CASE__ = ( ( '''1''' + '''0''' * (binary_number_length - len(lowerCAmelCase_ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
100
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) lowercase_ : List[Any] = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[Any] = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Any = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Optional[int] = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = ["""image_processor""", """tokenizer"""] _UpperCAmelCase = """FlavaImageProcessor""" _UpperCAmelCase = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('feature_extractor' ) SCREAMING_SNAKE_CASE_ : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = self.image_processor def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ): """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer( text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) if images is not None: SCREAMING_SNAKE_CASE_ : int = self.image_processor( lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) if text is not None and images is not None: encoding.update(lowerCAmelCase__ ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ ) def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.model_input_names SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCamelCase__ ( self ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , ) return self.image_processor_class @property def UpperCamelCase__ ( self ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , ) return self.image_processor
101
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def A__ ( ): SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , ) SCREAMING_SNAKE_CASE__: Any= parser.parse_args() return args def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ): if not len(snake_case_ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size for i, img in enumerate(snake_case_ ): grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) ) return grid def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ): SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ ) SCREAMING_SNAKE_CASE__: Optional[int]= pipeline( snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowercase_ : List[str] = parse_args() # Load models and create wrapper for stable diffusion lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer') lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder') lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae') lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet') lowercase_ : Dict = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowercase_ : str = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')): lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, 'unet', unet) else: lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id)) lowercase_ : str = pipeline.to(unet.device) lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split())))) lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
64
0
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __magic_name__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , _A , _A=7_6_8 ): '''simple docstring''' super().__init__(_A ) UpperCamelCase : Tuple = proj_size UpperCamelCase : Optional[int] = CLIPVisionModel(_A ) UpperCamelCase : Union[str, Any] = PaintByExampleMapper(_A ) UpperCamelCase : int = nn.LayerNorm(config.hidden_size ) UpperCamelCase : str = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCamelCase : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _a ( self , _A , _A=False ): '''simple docstring''' UpperCamelCase : List[str] = self.model(pixel_values=_A ) UpperCamelCase : Tuple = clip_output.pooler_output UpperCamelCase : List[Any] = self.mapper(latent_states[:, None] ) UpperCamelCase : List[Any] = self.final_layer_norm(_A ) UpperCamelCase : Union[str, Any] = self.proj_out(_A ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class lowercase__ ( nn.Module ): """simple docstring""" def __init__( self , _A ): '''simple docstring''' super().__init__() UpperCamelCase : Any = (config.num_hidden_layers + 1) // 5 UpperCamelCase : Optional[Any] = config.hidden_size UpperCamelCase : int = 1 UpperCamelCase : Optional[int] = nn.ModuleList( [ BasicTransformerBlock(_A , _A , _A , activation_fn="""gelu""" , attention_bias=_A ) for _ in range(_A ) ] ) def _a ( self , _A ): '''simple docstring''' for block in self.blocks: UpperCamelCase : Union[str, Any] = block(_A ) return hidden_states
102
from __future__ import annotations from collections import deque class _lowerCamelCase : def __init__( self , lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: list[dict]= [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(lowerCAmelCase ) self.set_fail_transitions() def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None: for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase_ ( self , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: str= 0 for character in keyword: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1 else: SCREAMING_SNAKE_CASE__: List[Any]= next_state self.adlist[current_state]["output"].append(lowerCAmelCase ) def UpperCamelCase_ ( self ) -> None: SCREAMING_SNAKE_CASE__: deque= deque() for node in self.adlist[0]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= 0 while q: SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state'''] while ( self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None and state != 0 ): SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Dict= self.find_next_state( lowerCAmelCase , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 SCREAMING_SNAKE_CASE__: str= ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]: SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences SCREAMING_SNAKE_CASE__: Optional[Any]= 0 for i in range(len(lowerCAmelCase ) ): while ( self.find_next_state(lowerCAmelCase , string[i] ) is None and current_state != 0 ): SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state'''] SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] ) if next_state is None: SCREAMING_SNAKE_CASE__: List[Any]= 0 else: SCREAMING_SNAKE_CASE__: Dict= next_state for key in self.adlist[current_state]["output"]: if key not in result: SCREAMING_SNAKE_CASE__: Optional[Any]= [] result[key].append(i - len(lowerCAmelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
64
0
"""simple docstring""" from functools import lru_cache def snake_case ( lowerCAmelCase_ ) -> set: _snake_case = 2 _snake_case = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(lowerCAmelCase_ ) if n > 1: factors.add(lowerCAmelCase_ ) return factors @lru_cache def snake_case ( lowerCAmelCase_ ) -> int: return len(unique_prime_factors(lowerCAmelCase_ ) ) def snake_case ( lowerCAmelCase_ ) -> bool: return len(set(lowerCAmelCase_ ) ) in (0, 1) def snake_case ( lowerCAmelCase_ ) -> list: _snake_case = 2 while True: # Increment each value of a generated range _snake_case = [base + i for i in range(lowerCAmelCase_ )] # Run elements through out unique_prime_factors function # Append our target number to the end. _snake_case = [upf_len(lowerCAmelCase_ ) for x in group] checker.append(lowerCAmelCase_ ) # If all numbers in the list are equal, return the group variable. if equality(lowerCAmelCase_ ): return group # Increment our base variable by 1 base += 1 def snake_case ( lowerCAmelCase_ = 4 ) -> int: _snake_case = run(lowerCAmelCase_ ) return results[0] if len(lowerCAmelCase_ ) else None if __name__ == "__main__": print(solution())
103
import numpy as np def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__: int= ya SCREAMING_SNAKE_CASE__: Tuple= xa for k in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] ) SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
64
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" A__ : Dict = ["pixel_values"] def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> None: super().__init__(**SCREAMING_SNAKE_CASE__ ) A__ = size if size is not None else {"height": 384, "width": 384} A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_factor A__ = do_normalize A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ = image_std if image_std is not None else OPENAI_CLIP_STD A__ = do_convert_rgb def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) A__ = (size["height"], size["width"]) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str: return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray: return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image: A__ = do_resize if do_resize is not None else self.do_resize A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ = size if size is not None else self.size A__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) A__ = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: A__ = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images] # All transformations expect numpy arrays. A__ = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: A__ = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: A__ = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: A__ = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] A__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] A__ = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE__ ) return encoded_outputs
104
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' ) self.assertIsInstance(lowerCAmelCase , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' ) self.assertIsInstance(lowerCAmelCase , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' ) self.assertIsInstance(lowerCAmelCase , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
64
0
import math import qiskit def __UpperCAmelCase ( lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : int = 1 ) -> qiskit.result.counts.Counts: """simple docstring""" if ( isinstance(lowerCamelCase_ , lowerCamelCase_ ) or isinstance(lowerCamelCase_ , lowerCamelCase_ ) or isinstance(lowerCamelCase_ , lowerCamelCase_ ) ): raise TypeError('inputs must be integers.' ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError('inputs must be positive.' ) if ( (math.floor(lowerCamelCase_ ) != input_a) or (math.floor(lowerCamelCase_ ) != input_a) or (math.floor(lowerCamelCase_ ) != carry_in) ): raise ValueError('inputs must be exact integers.' ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError('inputs must be less or equal to 2.' ) # build registers SCREAMING_SNAKE_CASE_ : Tuple = qiskit.QuantumRegister(4 , 'qr' ) SCREAMING_SNAKE_CASE_ : Any = qiskit.ClassicalRegister(2 , 'cr' ) # list the entries SCREAMING_SNAKE_CASE_ : Optional[int] = [input_a, input_a, carry_in] SCREAMING_SNAKE_CASE_ : int = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(lowerCamelCase_ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(lowerCamelCase_ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(lowerCamelCase_ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , lowerCamelCase_ ) # measure the last two qbits SCREAMING_SNAKE_CASE_ : Any = qiskit.Aer.get_backend('aer_simulator' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 ) return job.result().get_counts(lowerCamelCase_ ) if __name__ == "__main__": print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
105
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase_ : Tuple = TypeVar('T') class _lowerCamelCase ( Generic[T] ): def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None: SCREAMING_SNAKE_CASE__: Any | T= None SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr SCREAMING_SNAKE_CASE__: List[Any]= fnc self.build() def UpperCamelCase_ ( self ) -> None: for p in range(self.N - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None: p += self.N SCREAMING_SNAKE_CASE__: Union[str, Any]= v while p > 1: SCREAMING_SNAKE_CASE__: Any= p // 2 SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N SCREAMING_SNAKE_CASE__: T | None= None while l <= r: if l % 2 == 1: SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] ) if r % 2 == 0: SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] ) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] lowercase_ : str = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } lowercase_ : int = SegmentTree(test_array, min) lowercase_ : Optional[int] = SegmentTree(test_array, max) lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b) def A__ ( ): for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): lowercase_ : int = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
64
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __snake_case :List[Any] ={ 'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'], 'processing_trocr': ['TrOCRProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :List[Any] =[ 'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrOCRForCausalLM', 'TrOCRPreTrainedModel', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __snake_case :List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
106
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __a = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase_ ( self ) -> str: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: str= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= 2 SCREAMING_SNAKE_CASE__: Tuple= randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ) SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: Tuple= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> Tuple: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> str: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): __a = StableDiffusionControlNetImgaImgPipeline __a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCamelCase_ ( self ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(lowerCAmelCase ): if isinstance(lowerCAmelCase , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) SCREAMING_SNAKE_CASE__: Any= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] ) SCREAMING_SNAKE_CASE__: int= { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]: if str(lowerCAmelCase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Any= 2 SCREAMING_SNAKE_CASE__: Tuple= [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ), ] SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) ) SCREAMING_SNAKE_CASE__: int= { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components() SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= 10.0 SCREAMING_SNAKE_CASE__: Any= 4 SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0] SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= steps SCREAMING_SNAKE_CASE__: List[Any]= scale SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: int= steps SCREAMING_SNAKE_CASE__: int= scale SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 assert np.sum(np.abs(output_a - output_a ) ) > 1e-3 def UpperCamelCase_ ( self ) -> int: return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(expected_max_diff=2e-3 ) def UpperCamelCase_ ( self ) -> Optional[Any]: SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components() SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(lowerCAmelCase ) except NotImplementedError: pass @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird''' SCREAMING_SNAKE_CASE__: List[str]= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: List[Any]= load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) SCREAMING_SNAKE_CASE__: Optional[Any]= pipe( lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0] assert image.shape == (512, 512, 3) SCREAMING_SNAKE_CASE__: str= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9e-2
64
0
'''simple docstring''' import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( ): from torch.utils.cpp_extension import load _A = Path(__snake_case ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' _A = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention' , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
107
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _lowerCamelCase : __a = 42 # setable values __a = 42 __a = 42 __a = None @classmethod def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]: return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase ) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = 42 class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ): __a = [e.name for e in FlaxKarrasDiffusionSchedulers] __a = 42 @property def UpperCamelCase_ ( self ) -> List[Any]: return True @register_to_config def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]: SCREAMING_SNAKE_CASE__: Optional[int]= dtype def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState: if common is None: SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype ) SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray: return sample def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState: SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]: SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) ) elif variance_type == "fixed_large": SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": SCREAMING_SNAKE_CASE__: List[Any]= variance SCREAMING_SNAKE_CASE__: Any= state.common.betas[t] SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2 SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log return variance def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep if key is None: SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 ) else: SCREAMING_SNAKE_CASE__: Any= None # 1. compute alphas, betas SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t] SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": SCREAMING_SNAKE_CASE__: str= model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' ''' for the FlaxDDPMScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 ) SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray: return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) def __len__( self ) -> Tuple: return self.config.num_train_timesteps
64
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __a: List[str] = logging.get_logger(__name__) __a: Any = { '''tanreinama/GPTSAN-2.8B-spout_is_uniform''': ( '''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = '''gptsan-japanese''' _lowerCamelCase = [ '''past_key_values''', ] _lowerCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] , lowerCamelCase : Union[str, Any]=3_6000 , lowerCamelCase : Optional[int]=1280 , lowerCamelCase : str=1024 , lowerCamelCase : Dict=8192 , lowerCamelCase : int=4096 , lowerCamelCase : Union[str, Any]=128 , lowerCamelCase : Optional[int]=10 , lowerCamelCase : Dict=0 , lowerCamelCase : int=16 , lowerCamelCase : List[str]=16 , lowerCamelCase : Tuple=128 , lowerCamelCase : Any=0.0 , lowerCamelCase : int=1E-5 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Tuple="float32" , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Any=False , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=0.002 , lowerCamelCase : Any=False , lowerCamelCase : str=True , lowerCamelCase : Dict=3_5998 , lowerCamelCase : Union[str, Any]=3_5995 , lowerCamelCase : Optional[Any]=3_5999 , **lowerCamelCase : Union[str, Any] , ) -> Dict: """simple docstring""" _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = d_model _UpperCAmelCase = d_ff _UpperCAmelCase = d_ext _UpperCAmelCase = d_spout _UpperCAmelCase = num_switch_layers _UpperCAmelCase = num_ext_layers _UpperCAmelCase = num_switch_layers + num_ext_layers _UpperCAmelCase = num_heads _UpperCAmelCase = num_experts _UpperCAmelCase = expert_capacity _UpperCAmelCase = dropout_rate _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = router_bias _UpperCAmelCase = router_jitter_noise _UpperCAmelCase = router_dtype _UpperCAmelCase = router_ignore_padding_tokens _UpperCAmelCase = output_hidden_states _UpperCAmelCase = output_attentions _UpperCAmelCase = initializer_factor _UpperCAmelCase = output_router_logits _UpperCAmelCase = use_cache super().__init__( separator_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase , )
108
def A__ ( snake_case_ : int ): if upper_limit < 0: raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' ) SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 SCREAMING_SNAKE_CASE__: List[str]= 1 if upper_limit > 0: SCREAMING_SNAKE_CASE__: List[str]= 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(snake_case_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print('\n********* Catalan Numbers Using Dynamic Programming ************\n') print('\n*** Enter -1 at any time to quit ***') print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='') try: while True: lowercase_ : Any = int(input().strip()) if N < 0: print('\n********* Goodbye!! ************') break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print('Try another upper limit for the sequence: ', end='') except (NameError, ValueError): print('\n********* Invalid input, goodbye! ************\n') import doctest doctest.testmod()
64
0
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake a = numpy.array([0, 0]) a = numpy.array([0.5, 0.866_0254]) a = numpy.array([1, 0]) a = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[numpy.ndarray]: '''simple docstring''' __SCREAMING_SNAKE_CASE = initial_vectors for _ in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = iteration_step(__UpperCAmelCase ) return vectors def __magic_name__ ( __UpperCAmelCase ) -> list[numpy.ndarray]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for i, start_vector in enumerate(vectors[:-1] ): __SCREAMING_SNAKE_CASE = vectors[i + 1] new_vectors.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> numpy.ndarray: '''simple docstring''' __SCREAMING_SNAKE_CASE = numpy.radians(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = numpy.cos(__UpperCAmelCase ), numpy.sin(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = numpy.array(((c, -s), (s, c)) ) return numpy.dot(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> None: '''simple docstring''' __SCREAMING_SNAKE_CASE = plt.gca() axes.set_aspect("""equal""" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = zip(*__UpperCAmelCase ) plt.plot(__UpperCAmelCase , __UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() a = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
109
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
64
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCamelCase__ = { 'vocab_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json' ), }, 'merges_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt', 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt' ), }, 'tokenizer_file': { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json', 'roberta-base-openai-detector': ( 'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json' ), 'roberta-large-openai-detector': ( 'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json' ), }, } UpperCamelCase__ = { 'roberta-base': 5_12, 'roberta-large': 5_12, 'roberta-large-mnli': 5_12, 'distilroberta-base': 5_12, 'roberta-base-openai-detector': 5_12, 'roberta-large-openai-detector': 5_12, } class a ( lowercase ): UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Tuple = ["""input_ids""", """attention_mask"""] UpperCamelCase : Dict = RobertaTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCAmelCase__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space: UpperCAmelCase__ : Optional[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) ) UpperCAmelCase__ : List[Any] = add_prefix_space UpperCAmelCase__ : Tuple = pre_tok_class(**UpperCamelCase_ ) UpperCAmelCase__ : Optional[Any] = add_prefix_space UpperCAmelCase__ : Optional[Any] = 'post_processor' UpperCAmelCase__ : List[str] = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) if tokenizer_component_instance: UpperCAmelCase__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase__ : Any = tuple(state['sep'] ) if "cls" in state: UpperCAmelCase__ : List[str] = tuple(state['cls'] ) UpperCAmelCase__ : Tuple = False if state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space: UpperCAmelCase__ : Optional[int] = add_prefix_space UpperCAmelCase__ : int = True if state.get('trim_offsets' , UpperCamelCase_ ) != trim_offsets: UpperCAmelCase__ : Optional[int] = trim_offsets UpperCAmelCase__ : Union[str, Any] = True if changes_to_apply: UpperCAmelCase__ : int = getattr(UpperCamelCase_ , state.pop('type' ) ) UpperCAmelCase__ : Dict = component_class(**UpperCamelCase_ ) setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) @property def __snake_case ( self ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __snake_case ( self , UpperCamelCase_ ): UpperCAmelCase__ : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value UpperCAmelCase__ : Tuple = value def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): UpperCAmelCase__ : Optional[int] = kwargs.get('is_split_into_words' , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ): UpperCAmelCase__ : Union[str, Any] = kwargs.get('is_split_into_words' , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): UpperCAmelCase__ : List[str] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ): UpperCAmelCase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ): UpperCAmelCase__ : str = [self.sep_token_id] UpperCAmelCase__ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
110
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ : Optional[int] = logging.get_logger(__name__) def A__ ( snake_case_ : List[Any] ): SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' ) if "model" in sd.keys(): SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model'''] # pop unnecessary weights SCREAMING_SNAKE_CASE__: List[str]= [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: str= { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ ) SCREAMING_SNAKE_CASE__: int= list(sd.keys() ) for key in keys: if ".qkv_proj." in key: SCREAMING_SNAKE_CASE__: int= sd[key] # We split QKV in separate Q,K,V SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' ) SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' ) SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 ) SCREAMING_SNAKE_CASE__: List[Any]= q SCREAMING_SNAKE_CASE__: Any= k SCREAMING_SNAKE_CASE__: Optional[Any]= v del sd[key] return sd @torch.no_grad() def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ): SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ ) if config is not None: SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ ) else: SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig() SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval() model.load_state_dict(snake_case_ ) # Check results Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') lowercase_ : int = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
64
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 __magic_name__: int = get_tests_dir("fixtures") __magic_name__: Dict = get_tests_dir("fixtures/dummy_feature_extractor_config.json") __magic_name__: Union[str, Any] = get_tests_dir("fixtures/dummy-config.json") class snake_case__ ( unittest.TestCase ): def __magic_name__ ( self ) -> Tuple: __magic_name__ : Dict = 0 def __magic_name__ ( self ) -> Optional[int]: __magic_name__ : Optional[int] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__ ( self ) -> Optional[Any]: __magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__ ( self ) -> str: with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ : List[Any] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally __magic_name__ : Any = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ ).to_dict() config_dict.pop("""feature_extractor_type""" ) __magic_name__ : Dict = WavaVecaFeatureExtractor(**lowerCAmelCase__ ) # save in new folder model_config.save_pretrained(lowerCAmelCase__ ) config.save_pretrained(lowerCAmelCase__ ) __magic_name__ : int = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ ) # make sure private variable is not incorrectly saved __magic_name__ : Optional[int] = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__ ( self ) -> str: __magic_name__ : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__ ( self ) -> Dict: with self.assertRaisesRegex( lowerCAmelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ): __magic_name__ : str = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def __magic_name__ ( self ) -> Any: with self.assertRaisesRegex( lowerCAmelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __magic_name__ : Any = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ , revision="""aaaaaa""" ) def __magic_name__ ( self ) -> int: with self.assertRaisesRegex( lowerCAmelCase__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): __magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def __magic_name__ ( self ) -> List[Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCAmelCase__ ): __magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase__ ): __magic_name__ : Optional[int] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase__ ) __magic_name__ : Any = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCAmelCase__ ) __magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def __magic_name__ ( self ) -> Any: try: AutoConfig.register("""custom""" , lowerCAmelCase__ ) AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase__ ): AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API __magic_name__ : Optional[int] = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCAmelCase__ ) __magic_name__ : Any = AutoFeatureExtractor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def __magic_name__ ( self ) -> Optional[int]: class snake_case__ ( UpperCamelCase_ ): lowercase__ : List[Any] = True try: AutoConfig.register("""custom""" , lowerCAmelCase__ ) AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # If remote code is not set, the default is to use local __magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. __magic_name__ : int = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub __magic_name__ : Any = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCAmelCase__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
324
def A__ ( snake_case_ : float , snake_case_ : float ): if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
64
0
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class UpperCAmelCase_ : """simple docstring""" UpperCamelCase_ = 42 UpperCamelCase_ = None UpperCamelCase_ = None def lowercase_ ( __A : TreeNode | None ) -> int: """simple docstring""" def is_valid_tree(__A : TreeNode | None ) -> bool: if node is None: return True if not isinstance(snake_case_ , snake_case_ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(snake_case_ ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( __A : TreeNode | None , __A : float , __A : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , snake_case_ ) ) return is_binary_search_tree_recursive_check(snake_case_ , -float('''inf''' ) , float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
94
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Any = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : int = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
64
0
"""simple docstring""" def SCREAMING_SNAKE_CASE_ ( snake_case : list[int] , snake_case : list[int] , snake_case : int )-> str: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(snake_case_ ) ) def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : int , snake_case : list[int] , snake_case : int )-> Any: # Base Case if index == len(snake_case_ ): return True # Recursive Step for i in range(snake_case_ ): if valid_coloring(graph[index] , snake_case_ , snake_case_ ): # Color current vertex _lowerCamelCase = i # Validate coloring if util_color(snake_case_ , snake_case_ , snake_case_ , index + 1 ): return True # Backtrack _lowerCamelCase = -1 return False def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : int )-> Tuple: _lowerCamelCase = [-1] * len(snake_case_ ) if util_color(snake_case_ , snake_case_ , snake_case_ , 0 ): return colored_vertices return []
650
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class _lowerCamelCase ( UpperCamelCase_ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __a = Features({"text": Value("string" )} ) __a = Features({"labels": ClassLabel} ) __a = "text" __a = "labels" def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , lowerCAmelCase ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self ) SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy() SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column] SCREAMING_SNAKE_CASE__: List[str]= label_schema return task_template @property def UpperCamelCase_ ( self ) -> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
64
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ : List[Any] = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
import inspect import unittest class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: try: import diffusers # noqa: F401 except ImportError: assert False def UpperCamelCase_ ( self ) -> List[str]: import diffusers from diffusers.dependency_versions_table import deps SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion''' elif backend == "invisible_watermark": SCREAMING_SNAKE_CASE__: int= '''invisible-watermark''' assert backend in deps, f'{backend} is not in the deps table!'
64
0
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ): def __init__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int = None , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Any = None , __lowerCamelCase : Optional[int] = False , __lowerCamelCase : str = False , __lowerCamelCase : Optional[Any] = None , __lowerCamelCase : Tuple = None , **__lowerCamelCase : Optional[int] , ): """simple docstring""" super().__init__( __lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , ) lowerCAmelCase__ = field lowerCAmelCase__ = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths} lowerCAmelCase__ = Json( cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , field=__lowerCamelCase , **__lowerCamelCase , ) def A__ ( self : List[Any] ): """simple docstring""" # Build iterable dataset if self.streaming: lowerCAmelCase__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None self.builder.download_and_prepare( download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , ) lowerCAmelCase__ = self.builder.as_dataset( split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory ) return dataset class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] = None , __lowerCamelCase : List[str] = None , **__lowerCamelCase : int , ): """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) lowerCAmelCase__ = dataset lowerCAmelCase__ = path_or_buf lowerCAmelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCAmelCase__ = num_proc lowerCAmelCase__ = '''utf-8''' lowerCAmelCase__ = to_json_kwargs def A__ ( self : List[str] ): """simple docstring""" lowerCAmelCase__ = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCamelCase ) lowerCAmelCase__ = self.to_json_kwargs.pop('''orient''' , '''records''' ) lowerCAmelCase__ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) lowerCAmelCase__ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) lowerCAmelCase__ = self.to_json_kwargs.pop('''compression''' , __lowerCamelCase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCamelCase ) as buffer: lowerCAmelCase__ = self._write(file_obj=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" ''' was passed. Please provide a local path instead.''' ) lowerCAmelCase__ = self._write( file_obj=self.path_or_buf , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs ) return written def A__ ( self : int , __lowerCamelCase : Optional[int] ): """simple docstring""" lowerCAmelCase__ = args lowerCAmelCase__ = query_table( table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) lowerCAmelCase__ = batch.to_pandas().to_json( path_or_buf=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **__lowerCamelCase ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def A__ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple , **__lowerCamelCase : str , ): """simple docstring""" lowerCAmelCase__ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): lowerCAmelCase__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__lowerCamelCase ) else: lowerCAmelCase__ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(__lowerCamelCase ) return written
615
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _lowerCamelCase ( unittest.TestCase ): def UpperCamelCase_ ( self ) -> Any: if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , ) assert hasattr(self , '''env''' ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple: # configuration for running training on smdistributed Model Parallel SCREAMING_SNAKE_CASE__: Optional[Any]= { '''enabled''': True, '''processes_per_host''': 8, } SCREAMING_SNAKE_CASE__: Dict= { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> int: # create estimator SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase ) # run training estimator.fit() # result dataframe SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping SCREAMING_SNAKE_CASE__: List[Any]= ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
64
0
__a :str = [ 'DownloadConfig', 'DownloadManager', 'DownloadMode', 'StreamingDownloadManager', ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
86
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): @property def UpperCamelCase_ ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions() SCREAMING_SNAKE_CASE__: List[str]= False return options def UpperCamelCase_ ( self ) -> int: SCREAMING_SNAKE_CASE__: Dict= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) SCREAMING_SNAKE_CASE__: int= load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) SCREAMING_SNAKE_CASE__: Tuple= load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' ) # using the PNDM scheduler by default SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench''' SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 ) SCREAMING_SNAKE_CASE__: Any= pipe( prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , ) SCREAMING_SNAKE_CASE__: Any= output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-2
64
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class A__ ( UpperCamelCase_ ): """simple docstring""" @require_torch def _UpperCamelCase( self : Optional[Any] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a__ : Any = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' a__ : List[Any] = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' a__ : Dict = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache a__ : Union[str, Any] = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(lowerCamelCase__ ) BertModel.from_pretrained(lowerCamelCase__ ) BertTokenizer.from_pretrained(lowerCamelCase__ ) pipeline(task="fill-mask" , model=lowerCamelCase__ ) # baseline - just load from_pretrained with normal network a__ : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed a__ : Any = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Union[str, Any] = '''1''' a__ : str = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _UpperCamelCase( self : List[str] ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched a__ : Optional[Any] = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' a__ : Any = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' a__ : Optional[int] = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache a__ : Optional[Any] = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(lowerCamelCase__ ) BertModel.from_pretrained(lowerCamelCase__ ) BertTokenizer.from_pretrained(lowerCamelCase__ ) pipeline(task="fill-mask" , model=lowerCamelCase__ ) # baseline - just load from_pretrained with normal network a__ : Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed a__ : Optional[int] = self.get_env() a__ : Tuple = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _UpperCamelCase( self : Optional[Any] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched a__ : Union[str, Any] = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' a__ : Any = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' a__ : int = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network a__ : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed a__ : Union[str, Any] = self.get_env() a__ : Optional[int] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # next emulate no network a__ : Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : List[str] = '''1''' a__ : Optional[int] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) @require_torch def _UpperCamelCase( self : Tuple ): a__ : Tuple = ''' from transformers import pipeline ''' a__ : Dict = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' a__ : Optional[int] = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' a__ : Any = self.get_env() a__ : Optional[Any] = '''1''' a__ : Any = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] a__ : Tuple = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( "You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , ) @require_torch def _UpperCamelCase( self : Any ): a__ : str = ''' from transformers import AutoModel ''' a__ : Dict = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network a__ : str = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed a__ : int = self.get_env() a__ : Union[str, Any] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files a__ : Optional[int] = '''1''' a__ : int = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("success" , result.stdout.decode() )
37
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowercase_ : List[Any] = logging.get_logger(__name__) @dataclass class _lowerCamelCase ( UpperCamelCase_ ): __a = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **lowerCAmelCase ) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:] setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript ) SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**lowerCAmelCase ) __a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} ) __a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} ) __a = field( default="O1" , metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) } , ) @cached_property def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]: requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' ) SCREAMING_SNAKE_CASE__: Union[str, Any]= 0 elif is_torch_tpu_available(): SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device() SCREAMING_SNAKE_CASE__: Any= 0 else: SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count() return device, n_gpu @property def UpperCamelCase_ ( self ) -> Optional[Any]: return is_torch_tpu_available() and self.tpu @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCamelCase_ ( self ) -> "torch.device": requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def UpperCamelCase_ ( self ) -> int: requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def UpperCamelCase_ ( self ) -> str: return self.n_gpu > 0
64
0
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCamelCase ( __a :np.ndarray , __a :np.ndarray , __a :np.ndarray , __a :int , __a :int ) -> str: """simple docstring""" A__ = cva.getAffineTransform(snake_case_ , snake_case_ ) return cva.warpAffine(snake_case_ , snake_case_ , (rows, cols) ) if __name__ == "__main__": # read original image A : Any = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value A : Union[str, Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape A : int = gray_img.shape # set different points to rotate image A : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa) A : str = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa) A : List[str] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa) A : Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa) # add all rotated images in a list A : Dict = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations A : Union[str, Any] = plt.figure(1) A : Dict = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
176
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _lowerCamelCase ( unittest.TestCase ): def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str: SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30} SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30} SCREAMING_SNAKE_CASE__: Dict= parent SCREAMING_SNAKE_CASE__: List[str]= batch_size SCREAMING_SNAKE_CASE__: int= num_channels SCREAMING_SNAKE_CASE__: int= min_resolution SCREAMING_SNAKE_CASE__: List[Any]= max_resolution SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop SCREAMING_SNAKE_CASE__: Union[str, Any]= size SCREAMING_SNAKE_CASE__: Dict= crop_pct SCREAMING_SNAKE_CASE__: Optional[int]= crop_size SCREAMING_SNAKE_CASE__: Dict= do_normalize SCREAMING_SNAKE_CASE__: List[str]= image_mean SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std def UpperCamelCase_ ( self ) -> Tuple: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ): __a = PoolFormerImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ) -> List[Any]: SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self ) @property def UpperCamelCase_ ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ) -> Dict: SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) ) def UpperCamelCase_ ( self ) -> Tuple: SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 30} ) self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} ) SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase_ ( self ) -> Tuple: pass def UpperCamelCase_ ( self ) -> Optional[int]: # Initialize image_processing SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase_ ( self ) -> int: # Initialize image_processing SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
64
0
"""simple docstring""" # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys lowerCAmelCase: Optional[int] ='3' print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
607
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowercase_ : Tuple = 3 def A__ ( snake_case_ : int ): print('''Generating primitive root of p''' ) while True: SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ ) if pow(snake_case_ , 2 , snake_case_ ) == 1: continue if pow(snake_case_ , snake_case_ , snake_case_ ) == 1: continue return g def A__ ( snake_case_ : int ): print('''Generating prime p...''' ) SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number. SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p. SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety. SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p) SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d) return public_key, private_key def A__ ( snake_case_ : str , snake_case_ : int ): if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ ) print(F'\nWriting public key to file {name}_pubkey.txt...' ) with open(F'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(F'Writing private key to file {name}_privkey.txt...' ) with open(F'{name}_privkey.txt' , '''w''' ) as fo: fo.write(F'{private_key[0]},{private_key[1]}' ) def A__ ( ): print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
64
0
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __UpperCamelCase ( self ) ->Tuple: '''simple docstring''' __a = [10, 20, 30, 40, 50, 60] __a = [2, 4, 6, 8, 10, 12] __a = 100 self.assertEqual(kp.calc_profit(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , 210 ) def __UpperCamelCase ( self ) ->str: '''simple docstring''' self.assertRaisesRegex(lowerCamelCase , 'max_weight must greater than zero.' ) def __UpperCamelCase ( self ) ->Dict: '''simple docstring''' self.assertRaisesRegex(lowerCamelCase , 'Weight can not be negative.' ) def __UpperCamelCase ( self ) ->Tuple: '''simple docstring''' self.assertRaisesRegex(lowerCamelCase , 'Profit can not be negative.' ) def __UpperCamelCase ( self ) ->List[Any]: '''simple docstring''' self.assertRaisesRegex(lowerCamelCase , 'max_weight must greater than zero.' ) def __UpperCamelCase ( self ) ->int: '''simple docstring''' self.assertRaisesRegex( lowerCamelCase , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
448
from math import factorial def A__ ( snake_case_ : int , snake_case_ : int ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(5_2, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(4_0, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(1_0, 3)} ways that first, second and''', 'third place can be awarded.', )
64
0